input.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* SCTP kernel implementation
  3. * Copyright (c) 1999-2000 Cisco, Inc.
  4. * Copyright (c) 1999-2001 Motorola, Inc.
  5. * Copyright (c) 2001-2003 International Business Machines, Corp.
  6. * Copyright (c) 2001 Intel Corp.
  7. * Copyright (c) 2001 Nokia, Inc.
  8. * Copyright (c) 2001 La Monte H.P. Yarroll
  9. *
  10. * This file is part of the SCTP kernel implementation
  11. *
  12. * These functions handle all input from the IP layer into SCTP.
  13. *
  14. * Please send any bug reports or fixes you make to the
  15. * email address(es):
  16. * lksctp developers <[email protected]>
  17. *
  18. * Written or modified by:
  19. * La Monte H.P. Yarroll <[email protected]>
  20. * Karl Knutson <[email protected]>
  21. * Xingang Guo <[email protected]>
  22. * Jon Grimm <[email protected]>
  23. * Hui Huang <[email protected]>
  24. * Daisy Chang <[email protected]>
  25. * Sridhar Samudrala <[email protected]>
  26. * Ardelle Fan <[email protected]>
  27. */
  28. #include <linux/types.h>
  29. #include <linux/list.h> /* For struct list_head */
  30. #include <linux/socket.h>
  31. #include <linux/ip.h>
  32. #include <linux/time.h> /* For struct timeval */
  33. #include <linux/slab.h>
  34. #include <net/ip.h>
  35. #include <net/icmp.h>
  36. #include <net/snmp.h>
  37. #include <net/sock.h>
  38. #include <net/xfrm.h>
  39. #include <net/sctp/sctp.h>
  40. #include <net/sctp/sm.h>
  41. #include <net/sctp/checksum.h>
  42. #include <net/net_namespace.h>
  43. #include <linux/rhashtable.h>
  44. #include <net/sock_reuseport.h>
  45. /* Forward declarations for internal helpers. */
  46. static int sctp_rcv_ootb(struct sk_buff *);
  47. static struct sctp_association *__sctp_rcv_lookup(struct net *net,
  48. struct sk_buff *skb,
  49. const union sctp_addr *paddr,
  50. const union sctp_addr *laddr,
  51. struct sctp_transport **transportp);
  52. static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(
  53. struct net *net, struct sk_buff *skb,
  54. const union sctp_addr *laddr,
  55. const union sctp_addr *daddr);
  56. static struct sctp_association *__sctp_lookup_association(
  57. struct net *net,
  58. const union sctp_addr *local,
  59. const union sctp_addr *peer,
  60. struct sctp_transport **pt);
  61. static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
  62. /* Calculate the SCTP checksum of an SCTP packet. */
  63. static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
  64. {
  65. struct sctphdr *sh = sctp_hdr(skb);
  66. __le32 cmp = sh->checksum;
  67. __le32 val = sctp_compute_cksum(skb, 0);
  68. if (val != cmp) {
  69. /* CRC failure, dump it. */
  70. __SCTP_INC_STATS(net, SCTP_MIB_CHECKSUMERRORS);
  71. return -1;
  72. }
  73. return 0;
  74. }
  75. /*
  76. * This is the routine which IP calls when receiving an SCTP packet.
  77. */
  78. int sctp_rcv(struct sk_buff *skb)
  79. {
  80. struct sock *sk;
  81. struct sctp_association *asoc;
  82. struct sctp_endpoint *ep = NULL;
  83. struct sctp_ep_common *rcvr;
  84. struct sctp_transport *transport = NULL;
  85. struct sctp_chunk *chunk;
  86. union sctp_addr src;
  87. union sctp_addr dest;
  88. int bound_dev_if;
  89. int family;
  90. struct sctp_af *af;
  91. struct net *net = dev_net(skb->dev);
  92. bool is_gso = skb_is_gso(skb) && skb_is_gso_sctp(skb);
  93. if (skb->pkt_type != PACKET_HOST)
  94. goto discard_it;
  95. __SCTP_INC_STATS(net, SCTP_MIB_INSCTPPACKS);
  96. /* If packet is too small to contain a single chunk, let's not
  97. * waste time on it anymore.
  98. */
  99. if (skb->len < sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr) +
  100. skb_transport_offset(skb))
  101. goto discard_it;
  102. /* If the packet is fragmented and we need to do crc checking,
  103. * it's better to just linearize it otherwise crc computing
  104. * takes longer.
  105. */
  106. if ((!is_gso && skb_linearize(skb)) ||
  107. !pskb_may_pull(skb, sizeof(struct sctphdr)))
  108. goto discard_it;
  109. /* Pull up the IP header. */
  110. __skb_pull(skb, skb_transport_offset(skb));
  111. skb->csum_valid = 0; /* Previous value not applicable */
  112. if (skb_csum_unnecessary(skb))
  113. __skb_decr_checksum_unnecessary(skb);
  114. else if (!sctp_checksum_disable &&
  115. !is_gso &&
  116. sctp_rcv_checksum(net, skb) < 0)
  117. goto discard_it;
  118. skb->csum_valid = 1;
  119. __skb_pull(skb, sizeof(struct sctphdr));
  120. family = ipver2af(ip_hdr(skb)->version);
  121. af = sctp_get_af_specific(family);
  122. if (unlikely(!af))
  123. goto discard_it;
  124. SCTP_INPUT_CB(skb)->af = af;
  125. /* Initialize local addresses for lookups. */
  126. af->from_skb(&src, skb, 1);
  127. af->from_skb(&dest, skb, 0);
  128. /* If the packet is to or from a non-unicast address,
  129. * silently discard the packet.
  130. *
  131. * This is not clearly defined in the RFC except in section
  132. * 8.4 - OOTB handling. However, based on the book "Stream Control
  133. * Transmission Protocol" 2.1, "It is important to note that the
  134. * IP address of an SCTP transport address must be a routable
  135. * unicast address. In other words, IP multicast addresses and
  136. * IP broadcast addresses cannot be used in an SCTP transport
  137. * address."
  138. */
  139. if (!af->addr_valid(&src, NULL, skb) ||
  140. !af->addr_valid(&dest, NULL, skb))
  141. goto discard_it;
  142. asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport);
  143. if (!asoc)
  144. ep = __sctp_rcv_lookup_endpoint(net, skb, &dest, &src);
  145. /* Retrieve the common input handling substructure. */
  146. rcvr = asoc ? &asoc->base : &ep->base;
  147. sk = rcvr->sk;
  148. /*
  149. * If a frame arrives on an interface and the receiving socket is
  150. * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
  151. */
  152. bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
  153. if (bound_dev_if && (bound_dev_if != af->skb_iif(skb))) {
  154. if (transport) {
  155. sctp_transport_put(transport);
  156. asoc = NULL;
  157. transport = NULL;
  158. } else {
  159. sctp_endpoint_put(ep);
  160. ep = NULL;
  161. }
  162. sk = net->sctp.ctl_sock;
  163. ep = sctp_sk(sk)->ep;
  164. sctp_endpoint_hold(ep);
  165. rcvr = &ep->base;
  166. }
  167. /*
  168. * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
  169. * An SCTP packet is called an "out of the blue" (OOTB)
  170. * packet if it is correctly formed, i.e., passed the
  171. * receiver's checksum check, but the receiver is not
  172. * able to identify the association to which this
  173. * packet belongs.
  174. */
  175. if (!asoc) {
  176. if (sctp_rcv_ootb(skb)) {
  177. __SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
  178. goto discard_release;
  179. }
  180. }
  181. if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
  182. goto discard_release;
  183. nf_reset_ct(skb);
  184. if (sk_filter(sk, skb))
  185. goto discard_release;
  186. /* Create an SCTP packet structure. */
  187. chunk = sctp_chunkify(skb, asoc, sk, GFP_ATOMIC);
  188. if (!chunk)
  189. goto discard_release;
  190. SCTP_INPUT_CB(skb)->chunk = chunk;
  191. /* Remember what endpoint is to handle this packet. */
  192. chunk->rcvr = rcvr;
  193. /* Remember the SCTP header. */
  194. chunk->sctp_hdr = sctp_hdr(skb);
  195. /* Set the source and destination addresses of the incoming chunk. */
  196. sctp_init_addrs(chunk, &src, &dest);
  197. /* Remember where we came from. */
  198. chunk->transport = transport;
  199. /* Acquire access to the sock lock. Note: We are safe from other
  200. * bottom halves on this lock, but a user may be in the lock too,
  201. * so check if it is busy.
  202. */
  203. bh_lock_sock(sk);
  204. if (sk != rcvr->sk) {
  205. /* Our cached sk is different from the rcvr->sk. This is
  206. * because migrate()/accept() may have moved the association
  207. * to a new socket and released all the sockets. So now we
  208. * are holding a lock on the old socket while the user may
  209. * be doing something with the new socket. Switch our veiw
  210. * of the current sk.
  211. */
  212. bh_unlock_sock(sk);
  213. sk = rcvr->sk;
  214. bh_lock_sock(sk);
  215. }
  216. if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
  217. if (sctp_add_backlog(sk, skb)) {
  218. bh_unlock_sock(sk);
  219. sctp_chunk_free(chunk);
  220. skb = NULL; /* sctp_chunk_free already freed the skb */
  221. goto discard_release;
  222. }
  223. __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_BACKLOG);
  224. } else {
  225. __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_SOFTIRQ);
  226. sctp_inq_push(&chunk->rcvr->inqueue, chunk);
  227. }
  228. bh_unlock_sock(sk);
  229. /* Release the asoc/ep ref we took in the lookup calls. */
  230. if (transport)
  231. sctp_transport_put(transport);
  232. else
  233. sctp_endpoint_put(ep);
  234. return 0;
  235. discard_it:
  236. __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
  237. kfree_skb(skb);
  238. return 0;
  239. discard_release:
  240. /* Release the asoc/ep ref we took in the lookup calls. */
  241. if (transport)
  242. sctp_transport_put(transport);
  243. else
  244. sctp_endpoint_put(ep);
  245. goto discard_it;
  246. }
  247. /* Process the backlog queue of the socket. Every skb on
  248. * the backlog holds a ref on an association or endpoint.
  249. * We hold this ref throughout the state machine to make
  250. * sure that the structure we need is still around.
  251. */
  252. int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
  253. {
  254. struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
  255. struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
  256. struct sctp_transport *t = chunk->transport;
  257. struct sctp_ep_common *rcvr = NULL;
  258. int backloged = 0;
  259. rcvr = chunk->rcvr;
  260. /* If the rcvr is dead then the association or endpoint
  261. * has been deleted and we can safely drop the chunk
  262. * and refs that we are holding.
  263. */
  264. if (rcvr->dead) {
  265. sctp_chunk_free(chunk);
  266. goto done;
  267. }
  268. if (unlikely(rcvr->sk != sk)) {
  269. /* In this case, the association moved from one socket to
  270. * another. We are currently sitting on the backlog of the
  271. * old socket, so we need to move.
  272. * However, since we are here in the process context we
  273. * need to take make sure that the user doesn't own
  274. * the new socket when we process the packet.
  275. * If the new socket is user-owned, queue the chunk to the
  276. * backlog of the new socket without dropping any refs.
  277. * Otherwise, we can safely push the chunk on the inqueue.
  278. */
  279. sk = rcvr->sk;
  280. local_bh_disable();
  281. bh_lock_sock(sk);
  282. if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
  283. if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
  284. sctp_chunk_free(chunk);
  285. else
  286. backloged = 1;
  287. } else
  288. sctp_inq_push(inqueue, chunk);
  289. bh_unlock_sock(sk);
  290. local_bh_enable();
  291. /* If the chunk was backloged again, don't drop refs */
  292. if (backloged)
  293. return 0;
  294. } else {
  295. if (!sctp_newsk_ready(sk)) {
  296. if (!sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
  297. return 0;
  298. sctp_chunk_free(chunk);
  299. } else {
  300. sctp_inq_push(inqueue, chunk);
  301. }
  302. }
  303. done:
  304. /* Release the refs we took in sctp_add_backlog */
  305. if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
  306. sctp_transport_put(t);
  307. else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
  308. sctp_endpoint_put(sctp_ep(rcvr));
  309. else
  310. BUG();
  311. return 0;
  312. }
  313. static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
  314. {
  315. struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
  316. struct sctp_transport *t = chunk->transport;
  317. struct sctp_ep_common *rcvr = chunk->rcvr;
  318. int ret;
  319. ret = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
  320. if (!ret) {
  321. /* Hold the assoc/ep while hanging on the backlog queue.
  322. * This way, we know structures we need will not disappear
  323. * from us
  324. */
  325. if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
  326. sctp_transport_hold(t);
  327. else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
  328. sctp_endpoint_hold(sctp_ep(rcvr));
  329. else
  330. BUG();
  331. }
  332. return ret;
  333. }
  334. /* Handle icmp frag needed error. */
  335. void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
  336. struct sctp_transport *t, __u32 pmtu)
  337. {
  338. if (!t ||
  339. (t->pathmtu <= pmtu &&
  340. t->pl.probe_size + sctp_transport_pl_hlen(t) <= pmtu))
  341. return;
  342. if (sock_owned_by_user(sk)) {
  343. atomic_set(&t->mtu_info, pmtu);
  344. asoc->pmtu_pending = 1;
  345. t->pmtu_pending = 1;
  346. return;
  347. }
  348. if (!(t->param_flags & SPP_PMTUD_ENABLE))
  349. /* We can't allow retransmitting in such case, as the
  350. * retransmission would be sized just as before, and thus we
  351. * would get another icmp, and retransmit again.
  352. */
  353. return;
  354. /* Update transports view of the MTU. Return if no update was needed.
  355. * If an update wasn't needed/possible, it also doesn't make sense to
  356. * try to retransmit now.
  357. */
  358. if (!sctp_transport_update_pmtu(t, pmtu))
  359. return;
  360. /* Update association pmtu. */
  361. sctp_assoc_sync_pmtu(asoc);
  362. /* Retransmit with the new pmtu setting. */
  363. sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
  364. }
  365. void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
  366. struct sk_buff *skb)
  367. {
  368. struct dst_entry *dst;
  369. if (sock_owned_by_user(sk) || !t)
  370. return;
  371. dst = sctp_transport_dst_check(t);
  372. if (dst)
  373. dst->ops->redirect(dst, sk, skb);
  374. }
  375. /*
  376. * SCTP Implementer's Guide, 2.37 ICMP handling procedures
  377. *
  378. * ICMP8) If the ICMP code is a "Unrecognized next header type encountered"
  379. * or a "Protocol Unreachable" treat this message as an abort
  380. * with the T bit set.
  381. *
  382. * This function sends an event to the state machine, which will abort the
  383. * association.
  384. *
  385. */
  386. void sctp_icmp_proto_unreachable(struct sock *sk,
  387. struct sctp_association *asoc,
  388. struct sctp_transport *t)
  389. {
  390. if (sock_owned_by_user(sk)) {
  391. if (timer_pending(&t->proto_unreach_timer))
  392. return;
  393. else {
  394. if (!mod_timer(&t->proto_unreach_timer,
  395. jiffies + (HZ/20)))
  396. sctp_transport_hold(t);
  397. }
  398. } else {
  399. struct net *net = sock_net(sk);
  400. pr_debug("%s: unrecognized next header type "
  401. "encountered!\n", __func__);
  402. if (del_timer(&t->proto_unreach_timer))
  403. sctp_transport_put(t);
  404. sctp_do_sm(net, SCTP_EVENT_T_OTHER,
  405. SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
  406. asoc->state, asoc->ep, asoc, t,
  407. GFP_ATOMIC);
  408. }
  409. }
  410. /* Common lookup code for icmp/icmpv6 error handler. */
  411. struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
  412. struct sctphdr *sctphdr,
  413. struct sctp_association **app,
  414. struct sctp_transport **tpp)
  415. {
  416. struct sctp_init_chunk *chunkhdr, _chunkhdr;
  417. union sctp_addr saddr;
  418. union sctp_addr daddr;
  419. struct sctp_af *af;
  420. struct sock *sk = NULL;
  421. struct sctp_association *asoc;
  422. struct sctp_transport *transport = NULL;
  423. __u32 vtag = ntohl(sctphdr->vtag);
  424. *app = NULL; *tpp = NULL;
  425. af = sctp_get_af_specific(family);
  426. if (unlikely(!af)) {
  427. return NULL;
  428. }
  429. /* Initialize local addresses for lookups. */
  430. af->from_skb(&saddr, skb, 1);
  431. af->from_skb(&daddr, skb, 0);
  432. /* Look for an association that matches the incoming ICMP error
  433. * packet.
  434. */
  435. asoc = __sctp_lookup_association(net, &saddr, &daddr, &transport);
  436. if (!asoc)
  437. return NULL;
  438. sk = asoc->base.sk;
  439. /* RFC 4960, Appendix C. ICMP Handling
  440. *
  441. * ICMP6) An implementation MUST validate that the Verification Tag
  442. * contained in the ICMP message matches the Verification Tag of
  443. * the peer. If the Verification Tag is not 0 and does NOT
  444. * match, discard the ICMP message. If it is 0 and the ICMP
  445. * message contains enough bytes to verify that the chunk type is
  446. * an INIT chunk and that the Initiate Tag matches the tag of the
  447. * peer, continue with ICMP7. If the ICMP message is too short
  448. * or the chunk type or the Initiate Tag does not match, silently
  449. * discard the packet.
  450. */
  451. if (vtag == 0) {
  452. /* chunk header + first 4 octects of init header */
  453. chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
  454. sizeof(struct sctphdr),
  455. sizeof(struct sctp_chunkhdr) +
  456. sizeof(__be32), &_chunkhdr);
  457. if (!chunkhdr ||
  458. chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
  459. ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
  460. goto out;
  461. } else if (vtag != asoc->c.peer_vtag) {
  462. goto out;
  463. }
  464. bh_lock_sock(sk);
  465. /* If too many ICMPs get dropped on busy
  466. * servers this needs to be solved differently.
  467. */
  468. if (sock_owned_by_user(sk))
  469. __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
  470. *app = asoc;
  471. *tpp = transport;
  472. return sk;
  473. out:
  474. sctp_transport_put(transport);
  475. return NULL;
  476. }
  477. /* Common cleanup code for icmp/icmpv6 error handler. */
  478. void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
  479. __releases(&((__sk)->sk_lock.slock))
  480. {
  481. bh_unlock_sock(sk);
  482. sctp_transport_put(t);
  483. }
  484. static void sctp_v4_err_handle(struct sctp_transport *t, struct sk_buff *skb,
  485. __u8 type, __u8 code, __u32 info)
  486. {
  487. struct sctp_association *asoc = t->asoc;
  488. struct sock *sk = asoc->base.sk;
  489. int err = 0;
  490. switch (type) {
  491. case ICMP_PARAMETERPROB:
  492. err = EPROTO;
  493. break;
  494. case ICMP_DEST_UNREACH:
  495. if (code > NR_ICMP_UNREACH)
  496. return;
  497. if (code == ICMP_FRAG_NEEDED) {
  498. sctp_icmp_frag_needed(sk, asoc, t, SCTP_TRUNC4(info));
  499. return;
  500. }
  501. if (code == ICMP_PROT_UNREACH) {
  502. sctp_icmp_proto_unreachable(sk, asoc, t);
  503. return;
  504. }
  505. err = icmp_err_convert[code].errno;
  506. break;
  507. case ICMP_TIME_EXCEEDED:
  508. if (code == ICMP_EXC_FRAGTIME)
  509. return;
  510. err = EHOSTUNREACH;
  511. break;
  512. case ICMP_REDIRECT:
  513. sctp_icmp_redirect(sk, t, skb);
  514. return;
  515. default:
  516. return;
  517. }
  518. if (!sock_owned_by_user(sk) && inet_sk(sk)->recverr) {
  519. sk->sk_err = err;
  520. sk_error_report(sk);
  521. } else { /* Only an error on timeout */
  522. sk->sk_err_soft = err;
  523. }
  524. }
  525. /*
  526. * This routine is called by the ICMP module when it gets some
  527. * sort of error condition. If err < 0 then the socket should
  528. * be closed and the error returned to the user. If err > 0
  529. * it's just the icmp type << 8 | icmp code. After adjustment
  530. * header points to the first 8 bytes of the sctp header. We need
  531. * to find the appropriate port.
  532. *
  533. * The locking strategy used here is very "optimistic". When
  534. * someone else accesses the socket the ICMP is just dropped
  535. * and for some paths there is no check at all.
  536. * A more general error queue to queue errors for later handling
  537. * is probably better.
  538. *
  539. */
  540. int sctp_v4_err(struct sk_buff *skb, __u32 info)
  541. {
  542. const struct iphdr *iph = (const struct iphdr *)skb->data;
  543. const int type = icmp_hdr(skb)->type;
  544. const int code = icmp_hdr(skb)->code;
  545. struct net *net = dev_net(skb->dev);
  546. struct sctp_transport *transport;
  547. struct sctp_association *asoc;
  548. __u16 saveip, savesctp;
  549. struct sock *sk;
  550. /* Fix up skb to look at the embedded net header. */
  551. saveip = skb->network_header;
  552. savesctp = skb->transport_header;
  553. skb_reset_network_header(skb);
  554. skb_set_transport_header(skb, iph->ihl * 4);
  555. sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
  556. /* Put back, the original values. */
  557. skb->network_header = saveip;
  558. skb->transport_header = savesctp;
  559. if (!sk) {
  560. __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
  561. return -ENOENT;
  562. }
  563. sctp_v4_err_handle(transport, skb, type, code, info);
  564. sctp_err_finish(sk, transport);
  565. return 0;
  566. }
  567. int sctp_udp_v4_err(struct sock *sk, struct sk_buff *skb)
  568. {
  569. struct net *net = dev_net(skb->dev);
  570. struct sctp_association *asoc;
  571. struct sctp_transport *t;
  572. struct icmphdr *hdr;
  573. __u32 info = 0;
  574. skb->transport_header += sizeof(struct udphdr);
  575. sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &t);
  576. if (!sk) {
  577. __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
  578. return -ENOENT;
  579. }
  580. skb->transport_header -= sizeof(struct udphdr);
  581. hdr = (struct icmphdr *)(skb_network_header(skb) - sizeof(struct icmphdr));
  582. if (hdr->type == ICMP_REDIRECT) {
  583. /* can't be handled without outer iphdr known, leave it to udp_err */
  584. sctp_err_finish(sk, t);
  585. return 0;
  586. }
  587. if (hdr->type == ICMP_DEST_UNREACH && hdr->code == ICMP_FRAG_NEEDED)
  588. info = ntohs(hdr->un.frag.mtu);
  589. sctp_v4_err_handle(t, skb, hdr->type, hdr->code, info);
  590. sctp_err_finish(sk, t);
  591. return 1;
  592. }
  593. /*
  594. * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
  595. *
  596. * This function scans all the chunks in the OOTB packet to determine if
  597. * the packet should be discarded right away. If a response might be needed
  598. * for this packet, or, if further processing is possible, the packet will
  599. * be queued to a proper inqueue for the next phase of handling.
  600. *
  601. * Output:
  602. * Return 0 - If further processing is needed.
  603. * Return 1 - If the packet can be discarded right away.
  604. */
  605. static int sctp_rcv_ootb(struct sk_buff *skb)
  606. {
  607. struct sctp_chunkhdr *ch, _ch;
  608. int ch_end, offset = 0;
  609. /* Scan through all the chunks in the packet. */
  610. do {
  611. /* Make sure we have at least the header there */
  612. if (offset + sizeof(_ch) > skb->len)
  613. break;
  614. ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch);
  615. /* Break out if chunk length is less then minimal. */
  616. if (!ch || ntohs(ch->length) < sizeof(_ch))
  617. break;
  618. ch_end = offset + SCTP_PAD4(ntohs(ch->length));
  619. if (ch_end > skb->len)
  620. break;
  621. /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
  622. * receiver MUST silently discard the OOTB packet and take no
  623. * further action.
  624. */
  625. if (SCTP_CID_ABORT == ch->type)
  626. goto discard;
  627. /* RFC 8.4, 6) If the packet contains a SHUTDOWN COMPLETE
  628. * chunk, the receiver should silently discard the packet
  629. * and take no further action.
  630. */
  631. if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type)
  632. goto discard;
  633. /* RFC 4460, 2.11.2
  634. * This will discard packets with INIT chunk bundled as
  635. * subsequent chunks in the packet. When INIT is first,
  636. * the normal INIT processing will discard the chunk.
  637. */
  638. if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
  639. goto discard;
  640. offset = ch_end;
  641. } while (ch_end < skb->len);
  642. return 0;
  643. discard:
  644. return 1;
  645. }
  646. /* Insert endpoint into the hash table. */
  647. static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
  648. {
  649. struct sock *sk = ep->base.sk;
  650. struct net *net = sock_net(sk);
  651. struct sctp_hashbucket *head;
  652. ep->hashent = sctp_ep_hashfn(net, ep->base.bind_addr.port);
  653. head = &sctp_ep_hashtable[ep->hashent];
  654. if (sk->sk_reuseport) {
  655. bool any = sctp_is_ep_boundall(sk);
  656. struct sctp_endpoint *ep2;
  657. struct list_head *list;
  658. int cnt = 0, err = 1;
  659. list_for_each(list, &ep->base.bind_addr.address_list)
  660. cnt++;
  661. sctp_for_each_hentry(ep2, &head->chain) {
  662. struct sock *sk2 = ep2->base.sk;
  663. if (!net_eq(sock_net(sk2), net) || sk2 == sk ||
  664. !uid_eq(sock_i_uid(sk2), sock_i_uid(sk)) ||
  665. !sk2->sk_reuseport)
  666. continue;
  667. err = sctp_bind_addrs_check(sctp_sk(sk2),
  668. sctp_sk(sk), cnt);
  669. if (!err) {
  670. err = reuseport_add_sock(sk, sk2, any);
  671. if (err)
  672. return err;
  673. break;
  674. } else if (err < 0) {
  675. return err;
  676. }
  677. }
  678. if (err) {
  679. err = reuseport_alloc(sk, any);
  680. if (err)
  681. return err;
  682. }
  683. }
  684. write_lock(&head->lock);
  685. hlist_add_head(&ep->node, &head->chain);
  686. write_unlock(&head->lock);
  687. return 0;
  688. }
  689. /* Add an endpoint to the hash. Local BH-safe. */
  690. int sctp_hash_endpoint(struct sctp_endpoint *ep)
  691. {
  692. int err;
  693. local_bh_disable();
  694. err = __sctp_hash_endpoint(ep);
  695. local_bh_enable();
  696. return err;
  697. }
  698. /* Remove endpoint from the hash table. */
  699. static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
  700. {
  701. struct sock *sk = ep->base.sk;
  702. struct sctp_hashbucket *head;
  703. ep->hashent = sctp_ep_hashfn(sock_net(sk), ep->base.bind_addr.port);
  704. head = &sctp_ep_hashtable[ep->hashent];
  705. if (rcu_access_pointer(sk->sk_reuseport_cb))
  706. reuseport_detach_sock(sk);
  707. write_lock(&head->lock);
  708. hlist_del_init(&ep->node);
  709. write_unlock(&head->lock);
  710. }
  711. /* Remove endpoint from the hash. Local BH-safe. */
  712. void sctp_unhash_endpoint(struct sctp_endpoint *ep)
  713. {
  714. local_bh_disable();
  715. __sctp_unhash_endpoint(ep);
  716. local_bh_enable();
  717. }
  718. static inline __u32 sctp_hashfn(const struct net *net, __be16 lport,
  719. const union sctp_addr *paddr, __u32 seed)
  720. {
  721. __u32 addr;
  722. if (paddr->sa.sa_family == AF_INET6)
  723. addr = jhash(&paddr->v6.sin6_addr, 16, seed);
  724. else
  725. addr = (__force __u32)paddr->v4.sin_addr.s_addr;
  726. return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
  727. (__force __u32)lport, net_hash_mix(net), seed);
  728. }
  729. /* Look up an endpoint. */
  730. static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(
  731. struct net *net, struct sk_buff *skb,
  732. const union sctp_addr *laddr,
  733. const union sctp_addr *paddr)
  734. {
  735. struct sctp_hashbucket *head;
  736. struct sctp_endpoint *ep;
  737. struct sock *sk;
  738. __be16 lport;
  739. int hash;
  740. lport = laddr->v4.sin_port;
  741. hash = sctp_ep_hashfn(net, ntohs(lport));
  742. head = &sctp_ep_hashtable[hash];
  743. read_lock(&head->lock);
  744. sctp_for_each_hentry(ep, &head->chain) {
  745. if (sctp_endpoint_is_match(ep, net, laddr))
  746. goto hit;
  747. }
  748. ep = sctp_sk(net->sctp.ctl_sock)->ep;
  749. hit:
  750. sk = ep->base.sk;
  751. if (sk->sk_reuseport) {
  752. __u32 phash = sctp_hashfn(net, lport, paddr, 0);
  753. sk = reuseport_select_sock(sk, phash, skb,
  754. sizeof(struct sctphdr));
  755. if (sk)
  756. ep = sctp_sk(sk)->ep;
  757. }
  758. sctp_endpoint_hold(ep);
  759. read_unlock(&head->lock);
  760. return ep;
  761. }
  762. /* rhashtable for transport */
  763. struct sctp_hash_cmp_arg {
  764. const union sctp_addr *paddr;
  765. const struct net *net;
  766. __be16 lport;
  767. };
  768. static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
  769. const void *ptr)
  770. {
  771. struct sctp_transport *t = (struct sctp_transport *)ptr;
  772. const struct sctp_hash_cmp_arg *x = arg->key;
  773. int err = 1;
  774. if (!sctp_cmp_addr_exact(&t->ipaddr, x->paddr))
  775. return err;
  776. if (!sctp_transport_hold(t))
  777. return err;
  778. if (!net_eq(t->asoc->base.net, x->net))
  779. goto out;
  780. if (x->lport != htons(t->asoc->base.bind_addr.port))
  781. goto out;
  782. err = 0;
  783. out:
  784. sctp_transport_put(t);
  785. return err;
  786. }
  787. static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
  788. {
  789. const struct sctp_transport *t = data;
  790. return sctp_hashfn(t->asoc->base.net,
  791. htons(t->asoc->base.bind_addr.port),
  792. &t->ipaddr, seed);
  793. }
  794. static inline __u32 sctp_hash_key(const void *data, u32 len, u32 seed)
  795. {
  796. const struct sctp_hash_cmp_arg *x = data;
  797. return sctp_hashfn(x->net, x->lport, x->paddr, seed);
  798. }
  799. static const struct rhashtable_params sctp_hash_params = {
  800. .head_offset = offsetof(struct sctp_transport, node),
  801. .hashfn = sctp_hash_key,
  802. .obj_hashfn = sctp_hash_obj,
  803. .obj_cmpfn = sctp_hash_cmp,
  804. .automatic_shrinking = true,
  805. };
  806. int sctp_transport_hashtable_init(void)
  807. {
  808. return rhltable_init(&sctp_transport_hashtable, &sctp_hash_params);
  809. }
  810. void sctp_transport_hashtable_destroy(void)
  811. {
  812. rhltable_destroy(&sctp_transport_hashtable);
  813. }
  814. int sctp_hash_transport(struct sctp_transport *t)
  815. {
  816. struct sctp_transport *transport;
  817. struct rhlist_head *tmp, *list;
  818. struct sctp_hash_cmp_arg arg;
  819. int err;
  820. if (t->asoc->temp)
  821. return 0;
  822. arg.net = t->asoc->base.net;
  823. arg.paddr = &t->ipaddr;
  824. arg.lport = htons(t->asoc->base.bind_addr.port);
  825. rcu_read_lock();
  826. list = rhltable_lookup(&sctp_transport_hashtable, &arg,
  827. sctp_hash_params);
  828. rhl_for_each_entry_rcu(transport, tmp, list, node)
  829. if (transport->asoc->ep == t->asoc->ep) {
  830. rcu_read_unlock();
  831. return -EEXIST;
  832. }
  833. rcu_read_unlock();
  834. err = rhltable_insert_key(&sctp_transport_hashtable, &arg,
  835. &t->node, sctp_hash_params);
  836. if (err)
  837. pr_err_once("insert transport fail, errno %d\n", err);
  838. return err;
  839. }
  840. void sctp_unhash_transport(struct sctp_transport *t)
  841. {
  842. if (t->asoc->temp)
  843. return;
  844. rhltable_remove(&sctp_transport_hashtable, &t->node,
  845. sctp_hash_params);
  846. }
  847. /* return a transport with holding it */
  848. struct sctp_transport *sctp_addrs_lookup_transport(
  849. struct net *net,
  850. const union sctp_addr *laddr,
  851. const union sctp_addr *paddr)
  852. {
  853. struct rhlist_head *tmp, *list;
  854. struct sctp_transport *t;
  855. struct sctp_hash_cmp_arg arg = {
  856. .paddr = paddr,
  857. .net = net,
  858. .lport = laddr->v4.sin_port,
  859. };
  860. list = rhltable_lookup(&sctp_transport_hashtable, &arg,
  861. sctp_hash_params);
  862. rhl_for_each_entry_rcu(t, tmp, list, node) {
  863. if (!sctp_transport_hold(t))
  864. continue;
  865. if (sctp_bind_addr_match(&t->asoc->base.bind_addr,
  866. laddr, sctp_sk(t->asoc->base.sk)))
  867. return t;
  868. sctp_transport_put(t);
  869. }
  870. return NULL;
  871. }
  872. /* return a transport without holding it, as it's only used under sock lock */
  873. struct sctp_transport *sctp_epaddr_lookup_transport(
  874. const struct sctp_endpoint *ep,
  875. const union sctp_addr *paddr)
  876. {
  877. struct rhlist_head *tmp, *list;
  878. struct sctp_transport *t;
  879. struct sctp_hash_cmp_arg arg = {
  880. .paddr = paddr,
  881. .net = ep->base.net,
  882. .lport = htons(ep->base.bind_addr.port),
  883. };
  884. list = rhltable_lookup(&sctp_transport_hashtable, &arg,
  885. sctp_hash_params);
  886. rhl_for_each_entry_rcu(t, tmp, list, node)
  887. if (ep == t->asoc->ep)
  888. return t;
  889. return NULL;
  890. }
  891. /* Look up an association. */
  892. static struct sctp_association *__sctp_lookup_association(
  893. struct net *net,
  894. const union sctp_addr *local,
  895. const union sctp_addr *peer,
  896. struct sctp_transport **pt)
  897. {
  898. struct sctp_transport *t;
  899. struct sctp_association *asoc = NULL;
  900. t = sctp_addrs_lookup_transport(net, local, peer);
  901. if (!t)
  902. goto out;
  903. asoc = t->asoc;
  904. *pt = t;
  905. out:
  906. return asoc;
  907. }
  908. /* Look up an association. protected by RCU read lock */
  909. static
  910. struct sctp_association *sctp_lookup_association(struct net *net,
  911. const union sctp_addr *laddr,
  912. const union sctp_addr *paddr,
  913. struct sctp_transport **transportp)
  914. {
  915. struct sctp_association *asoc;
  916. rcu_read_lock();
  917. asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
  918. rcu_read_unlock();
  919. return asoc;
  920. }
  921. /* Is there an association matching the given local and peer addresses? */
  922. bool sctp_has_association(struct net *net,
  923. const union sctp_addr *laddr,
  924. const union sctp_addr *paddr)
  925. {
  926. struct sctp_transport *transport;
  927. if (sctp_lookup_association(net, laddr, paddr, &transport)) {
  928. sctp_transport_put(transport);
  929. return true;
  930. }
  931. return false;
  932. }
  933. /*
  934. * SCTP Implementors Guide, 2.18 Handling of address
  935. * parameters within the INIT or INIT-ACK.
  936. *
  937. * D) When searching for a matching TCB upon reception of an INIT
  938. * or INIT-ACK chunk the receiver SHOULD use not only the
  939. * source address of the packet (containing the INIT or
  940. * INIT-ACK) but the receiver SHOULD also use all valid
  941. * address parameters contained within the chunk.
  942. *
  943. * 2.18.3 Solution description
  944. *
  945. * This new text clearly specifies to an implementor the need
  946. * to look within the INIT or INIT-ACK. Any implementation that
  947. * does not do this, may not be able to establish associations
  948. * in certain circumstances.
  949. *
  950. */
  951. static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
  952. struct sk_buff *skb,
  953. const union sctp_addr *laddr, struct sctp_transport **transportp)
  954. {
  955. struct sctp_association *asoc;
  956. union sctp_addr addr;
  957. union sctp_addr *paddr = &addr;
  958. struct sctphdr *sh = sctp_hdr(skb);
  959. union sctp_params params;
  960. struct sctp_init_chunk *init;
  961. struct sctp_af *af;
  962. /*
  963. * This code will NOT touch anything inside the chunk--it is
  964. * strictly READ-ONLY.
  965. *
  966. * RFC 2960 3 SCTP packet Format
  967. *
  968. * Multiple chunks can be bundled into one SCTP packet up to
  969. * the MTU size, except for the INIT, INIT ACK, and SHUTDOWN
  970. * COMPLETE chunks. These chunks MUST NOT be bundled with any
  971. * other chunk in a packet. See Section 6.10 for more details
  972. * on chunk bundling.
  973. */
  974. /* Find the start of the TLVs and the end of the chunk. This is
  975. * the region we search for address parameters.
  976. */
  977. init = (struct sctp_init_chunk *)skb->data;
  978. /* Walk the parameters looking for embedded addresses. */
  979. sctp_walk_params(params, init, init_hdr.params) {
  980. /* Note: Ignoring hostname addresses. */
  981. af = sctp_get_af_specific(param_type2af(params.p->type));
  982. if (!af)
  983. continue;
  984. if (!af->from_addr_param(paddr, params.addr, sh->source, 0))
  985. continue;
  986. asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
  987. if (asoc)
  988. return asoc;
  989. }
  990. return NULL;
  991. }
  992. /* ADD-IP, Section 5.2
  993. * When an endpoint receives an ASCONF Chunk from the remote peer
  994. * special procedures may be needed to identify the association the
  995. * ASCONF Chunk is associated with. To properly find the association
  996. * the following procedures SHOULD be followed:
  997. *
  998. * D2) If the association is not found, use the address found in the
  999. * Address Parameter TLV combined with the port number found in the
  1000. * SCTP common header. If found proceed to rule D4.
  1001. *
  1002. * D2-ext) If more than one ASCONF Chunks are packed together, use the
  1003. * address found in the ASCONF Address Parameter TLV of each of the
  1004. * subsequent ASCONF Chunks. If found, proceed to rule D4.
  1005. */
  1006. static struct sctp_association *__sctp_rcv_asconf_lookup(
  1007. struct net *net,
  1008. struct sctp_chunkhdr *ch,
  1009. const union sctp_addr *laddr,
  1010. __be16 peer_port,
  1011. struct sctp_transport **transportp)
  1012. {
  1013. struct sctp_addip_chunk *asconf = (struct sctp_addip_chunk *)ch;
  1014. struct sctp_af *af;
  1015. union sctp_addr_param *param;
  1016. union sctp_addr paddr;
  1017. if (ntohs(ch->length) < sizeof(*asconf) + sizeof(struct sctp_paramhdr))
  1018. return NULL;
  1019. /* Skip over the ADDIP header and find the Address parameter */
  1020. param = (union sctp_addr_param *)(asconf + 1);
  1021. af = sctp_get_af_specific(param_type2af(param->p.type));
  1022. if (unlikely(!af))
  1023. return NULL;
  1024. if (!af->from_addr_param(&paddr, param, peer_port, 0))
  1025. return NULL;
  1026. return __sctp_lookup_association(net, laddr, &paddr, transportp);
  1027. }
  1028. /* SCTP-AUTH, Section 6.3:
  1029. * If the receiver does not find a STCB for a packet containing an AUTH
  1030. * chunk as the first chunk and not a COOKIE-ECHO chunk as the second
  1031. * chunk, it MUST use the chunks after the AUTH chunk to look up an existing
  1032. * association.
  1033. *
  1034. * This means that any chunks that can help us identify the association need
  1035. * to be looked at to find this association.
  1036. */
  1037. static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
  1038. struct sk_buff *skb,
  1039. const union sctp_addr *laddr,
  1040. struct sctp_transport **transportp)
  1041. {
  1042. struct sctp_association *asoc = NULL;
  1043. struct sctp_chunkhdr *ch;
  1044. int have_auth = 0;
  1045. unsigned int chunk_num = 1;
  1046. __u8 *ch_end;
  1047. /* Walk through the chunks looking for AUTH or ASCONF chunks
  1048. * to help us find the association.
  1049. */
  1050. ch = (struct sctp_chunkhdr *)skb->data;
  1051. do {
  1052. /* Break out if chunk length is less then minimal. */
  1053. if (ntohs(ch->length) < sizeof(*ch))
  1054. break;
  1055. ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
  1056. if (ch_end > skb_tail_pointer(skb))
  1057. break;
  1058. switch (ch->type) {
  1059. case SCTP_CID_AUTH:
  1060. have_auth = chunk_num;
  1061. break;
  1062. case SCTP_CID_COOKIE_ECHO:
  1063. /* If a packet arrives containing an AUTH chunk as
  1064. * a first chunk, a COOKIE-ECHO chunk as the second
  1065. * chunk, and possibly more chunks after them, and
  1066. * the receiver does not have an STCB for that
  1067. * packet, then authentication is based on
  1068. * the contents of the COOKIE- ECHO chunk.
  1069. */
  1070. if (have_auth == 1 && chunk_num == 2)
  1071. return NULL;
  1072. break;
  1073. case SCTP_CID_ASCONF:
  1074. if (have_auth || net->sctp.addip_noauth)
  1075. asoc = __sctp_rcv_asconf_lookup(
  1076. net, ch, laddr,
  1077. sctp_hdr(skb)->source,
  1078. transportp);
  1079. break;
  1080. default:
  1081. break;
  1082. }
  1083. if (asoc)
  1084. break;
  1085. ch = (struct sctp_chunkhdr *)ch_end;
  1086. chunk_num++;
  1087. } while (ch_end + sizeof(*ch) < skb_tail_pointer(skb));
  1088. return asoc;
  1089. }
  1090. /*
  1091. * There are circumstances when we need to look inside the SCTP packet
  1092. * for information to help us find the association. Examples
  1093. * include looking inside of INIT/INIT-ACK chunks or after the AUTH
  1094. * chunks.
  1095. */
  1096. static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
  1097. struct sk_buff *skb,
  1098. const union sctp_addr *laddr,
  1099. struct sctp_transport **transportp)
  1100. {
  1101. struct sctp_chunkhdr *ch;
  1102. /* We do not allow GSO frames here as we need to linearize and
  1103. * then cannot guarantee frame boundaries. This shouldn't be an
  1104. * issue as packets hitting this are mostly INIT or INIT-ACK and
  1105. * those cannot be on GSO-style anyway.
  1106. */
  1107. if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
  1108. return NULL;
  1109. ch = (struct sctp_chunkhdr *)skb->data;
  1110. /* The code below will attempt to walk the chunk and extract
  1111. * parameter information. Before we do that, we need to verify
  1112. * that the chunk length doesn't cause overflow. Otherwise, we'll
  1113. * walk off the end.
  1114. */
  1115. if (SCTP_PAD4(ntohs(ch->length)) > skb->len)
  1116. return NULL;
  1117. /* If this is INIT/INIT-ACK look inside the chunk too. */
  1118. if (ch->type == SCTP_CID_INIT || ch->type == SCTP_CID_INIT_ACK)
  1119. return __sctp_rcv_init_lookup(net, skb, laddr, transportp);
  1120. return __sctp_rcv_walk_lookup(net, skb, laddr, transportp);
  1121. }
  1122. /* Lookup an association for an inbound skb. */
  1123. static struct sctp_association *__sctp_rcv_lookup(struct net *net,
  1124. struct sk_buff *skb,
  1125. const union sctp_addr *paddr,
  1126. const union sctp_addr *laddr,
  1127. struct sctp_transport **transportp)
  1128. {
  1129. struct sctp_association *asoc;
  1130. asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
  1131. if (asoc)
  1132. goto out;
  1133. /* Further lookup for INIT/INIT-ACK packets.
  1134. * SCTP Implementors Guide, 2.18 Handling of address
  1135. * parameters within the INIT or INIT-ACK.
  1136. */
  1137. asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp);
  1138. if (asoc)
  1139. goto out;
  1140. if (paddr->sa.sa_family == AF_INET)
  1141. pr_debug("sctp: asoc not found for src:%pI4:%d dst:%pI4:%d\n",
  1142. &laddr->v4.sin_addr, ntohs(laddr->v4.sin_port),
  1143. &paddr->v4.sin_addr, ntohs(paddr->v4.sin_port));
  1144. else
  1145. pr_debug("sctp: asoc not found for src:%pI6:%d dst:%pI6:%d\n",
  1146. &laddr->v6.sin6_addr, ntohs(laddr->v6.sin6_port),
  1147. &paddr->v6.sin6_addr, ntohs(paddr->v6.sin6_port));
  1148. out:
  1149. return asoc;
  1150. }