tcp_fastopen.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kernel.h>
  3. #include <linux/tcp.h>
  4. #include <linux/rcupdate.h>
  5. #include <net/tcp.h>
  6. void tcp_fastopen_init_key_once(struct net *net)
  7. {
  8. u8 key[TCP_FASTOPEN_KEY_LENGTH];
  9. struct tcp_fastopen_context *ctxt;
  10. rcu_read_lock();
  11. ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
  12. if (ctxt) {
  13. rcu_read_unlock();
  14. return;
  15. }
  16. rcu_read_unlock();
  17. /* tcp_fastopen_reset_cipher publishes the new context
  18. * atomically, so we allow this race happening here.
  19. *
  20. * All call sites of tcp_fastopen_cookie_gen also check
  21. * for a valid cookie, so this is an acceptable risk.
  22. */
  23. get_random_bytes(key, sizeof(key));
  24. tcp_fastopen_reset_cipher(net, NULL, key, NULL);
  25. }
  26. static void tcp_fastopen_ctx_free(struct rcu_head *head)
  27. {
  28. struct tcp_fastopen_context *ctx =
  29. container_of(head, struct tcp_fastopen_context, rcu);
  30. kfree_sensitive(ctx);
  31. }
  32. void tcp_fastopen_destroy_cipher(struct sock *sk)
  33. {
  34. struct tcp_fastopen_context *ctx;
  35. ctx = rcu_dereference_protected(
  36. inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
  37. if (ctx)
  38. call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
  39. }
  40. void tcp_fastopen_ctx_destroy(struct net *net)
  41. {
  42. struct tcp_fastopen_context *ctxt;
  43. ctxt = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, NULL);
  44. if (ctxt)
  45. call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
  46. }
  47. int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
  48. void *primary_key, void *backup_key)
  49. {
  50. struct tcp_fastopen_context *ctx, *octx;
  51. struct fastopen_queue *q;
  52. int err = 0;
  53. ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  54. if (!ctx) {
  55. err = -ENOMEM;
  56. goto out;
  57. }
  58. ctx->key[0].key[0] = get_unaligned_le64(primary_key);
  59. ctx->key[0].key[1] = get_unaligned_le64(primary_key + 8);
  60. if (backup_key) {
  61. ctx->key[1].key[0] = get_unaligned_le64(backup_key);
  62. ctx->key[1].key[1] = get_unaligned_le64(backup_key + 8);
  63. ctx->num = 2;
  64. } else {
  65. ctx->num = 1;
  66. }
  67. if (sk) {
  68. q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
  69. octx = xchg((__force struct tcp_fastopen_context **)&q->ctx, ctx);
  70. } else {
  71. octx = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, ctx);
  72. }
  73. if (octx)
  74. call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
  75. out:
  76. return err;
  77. }
  78. int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
  79. u64 *key)
  80. {
  81. struct tcp_fastopen_context *ctx;
  82. int n_keys = 0, i;
  83. rcu_read_lock();
  84. if (icsk)
  85. ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
  86. else
  87. ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
  88. if (ctx) {
  89. n_keys = tcp_fastopen_context_len(ctx);
  90. for (i = 0; i < n_keys; i++) {
  91. put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
  92. put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
  93. }
  94. }
  95. rcu_read_unlock();
  96. return n_keys;
  97. }
  98. static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
  99. struct sk_buff *syn,
  100. const siphash_key_t *key,
  101. struct tcp_fastopen_cookie *foc)
  102. {
  103. BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u64));
  104. if (req->rsk_ops->family == AF_INET) {
  105. const struct iphdr *iph = ip_hdr(syn);
  106. foc->val[0] = cpu_to_le64(siphash(&iph->saddr,
  107. sizeof(iph->saddr) +
  108. sizeof(iph->daddr),
  109. key));
  110. foc->len = TCP_FASTOPEN_COOKIE_SIZE;
  111. return true;
  112. }
  113. #if IS_ENABLED(CONFIG_IPV6)
  114. if (req->rsk_ops->family == AF_INET6) {
  115. const struct ipv6hdr *ip6h = ipv6_hdr(syn);
  116. foc->val[0] = cpu_to_le64(siphash(&ip6h->saddr,
  117. sizeof(ip6h->saddr) +
  118. sizeof(ip6h->daddr),
  119. key));
  120. foc->len = TCP_FASTOPEN_COOKIE_SIZE;
  121. return true;
  122. }
  123. #endif
  124. return false;
  125. }
  126. /* Generate the fastopen cookie by applying SipHash to both the source and
  127. * destination addresses.
  128. */
  129. static void tcp_fastopen_cookie_gen(struct sock *sk,
  130. struct request_sock *req,
  131. struct sk_buff *syn,
  132. struct tcp_fastopen_cookie *foc)
  133. {
  134. struct tcp_fastopen_context *ctx;
  135. rcu_read_lock();
  136. ctx = tcp_fastopen_get_ctx(sk);
  137. if (ctx)
  138. __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[0], foc);
  139. rcu_read_unlock();
  140. }
  141. /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
  142. * queue this additional data / FIN.
  143. */
  144. void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
  145. {
  146. struct tcp_sock *tp = tcp_sk(sk);
  147. if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
  148. return;
  149. skb = skb_clone(skb, GFP_ATOMIC);
  150. if (!skb)
  151. return;
  152. skb_dst_drop(skb);
  153. /* segs_in has been initialized to 1 in tcp_create_openreq_child().
  154. * Hence, reset segs_in to 0 before calling tcp_segs_in()
  155. * to avoid double counting. Also, tcp_segs_in() expects
  156. * skb->len to include the tcp_hdrlen. Hence, it should
  157. * be called before __skb_pull().
  158. */
  159. tp->segs_in = 0;
  160. tcp_segs_in(tp, skb);
  161. __skb_pull(skb, tcp_hdrlen(skb));
  162. sk_forced_mem_schedule(sk, skb->truesize);
  163. skb_set_owner_r(skb, sk);
  164. TCP_SKB_CB(skb)->seq++;
  165. TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
  166. tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
  167. __skb_queue_tail(&sk->sk_receive_queue, skb);
  168. tp->syn_data_acked = 1;
  169. /* u64_stats_update_begin(&tp->syncp) not needed here,
  170. * as we certainly are not changing upper 32bit value (0)
  171. */
  172. tp->bytes_received = skb->len;
  173. if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
  174. tcp_fin(sk);
  175. }
  176. /* returns 0 - no key match, 1 for primary, 2 for backup */
  177. static int tcp_fastopen_cookie_gen_check(struct sock *sk,
  178. struct request_sock *req,
  179. struct sk_buff *syn,
  180. struct tcp_fastopen_cookie *orig,
  181. struct tcp_fastopen_cookie *valid_foc)
  182. {
  183. struct tcp_fastopen_cookie search_foc = { .len = -1 };
  184. struct tcp_fastopen_cookie *foc = valid_foc;
  185. struct tcp_fastopen_context *ctx;
  186. int i, ret = 0;
  187. rcu_read_lock();
  188. ctx = tcp_fastopen_get_ctx(sk);
  189. if (!ctx)
  190. goto out;
  191. for (i = 0; i < tcp_fastopen_context_len(ctx); i++) {
  192. __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[i], foc);
  193. if (tcp_fastopen_cookie_match(foc, orig)) {
  194. ret = i + 1;
  195. goto out;
  196. }
  197. foc = &search_foc;
  198. }
  199. out:
  200. rcu_read_unlock();
  201. return ret;
  202. }
  203. static struct sock *tcp_fastopen_create_child(struct sock *sk,
  204. struct sk_buff *skb,
  205. struct request_sock *req)
  206. {
  207. struct tcp_sock *tp;
  208. struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
  209. struct sock *child;
  210. bool own_req;
  211. child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
  212. NULL, &own_req);
  213. if (!child)
  214. return NULL;
  215. spin_lock(&queue->fastopenq.lock);
  216. queue->fastopenq.qlen++;
  217. spin_unlock(&queue->fastopenq.lock);
  218. /* Initialize the child socket. Have to fix some values to take
  219. * into account the child is a Fast Open socket and is created
  220. * only out of the bits carried in the SYN packet.
  221. */
  222. tp = tcp_sk(child);
  223. rcu_assign_pointer(tp->fastopen_rsk, req);
  224. tcp_rsk(req)->tfo_listener = true;
  225. /* RFC1323: The window in SYN & SYN/ACK segments is never
  226. * scaled. So correct it appropriately.
  227. */
  228. tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
  229. tp->max_window = tp->snd_wnd;
  230. /* Activate the retrans timer so that SYNACK can be retransmitted.
  231. * The request socket is not added to the ehash
  232. * because it's been added to the accept queue directly.
  233. */
  234. req->timeout = tcp_timeout_init(child);
  235. inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
  236. req->timeout, TCP_RTO_MAX);
  237. refcount_set(&req->rsk_refcnt, 2);
  238. /* Now finish processing the fastopen child socket. */
  239. tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
  240. tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
  241. tcp_fastopen_add_skb(child, skb);
  242. tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
  243. tp->rcv_wup = tp->rcv_nxt;
  244. /* tcp_conn_request() is sending the SYNACK,
  245. * and queues the child into listener accept queue.
  246. */
  247. return child;
  248. }
  249. static bool tcp_fastopen_queue_check(struct sock *sk)
  250. {
  251. struct fastopen_queue *fastopenq;
  252. int max_qlen;
  253. /* Make sure the listener has enabled fastopen, and we don't
  254. * exceed the max # of pending TFO requests allowed before trying
  255. * to validating the cookie in order to avoid burning CPU cycles
  256. * unnecessarily.
  257. *
  258. * XXX (TFO) - The implication of checking the max_qlen before
  259. * processing a cookie request is that clients can't differentiate
  260. * between qlen overflow causing Fast Open to be disabled
  261. * temporarily vs a server not supporting Fast Open at all.
  262. */
  263. fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
  264. max_qlen = READ_ONCE(fastopenq->max_qlen);
  265. if (max_qlen == 0)
  266. return false;
  267. if (fastopenq->qlen >= max_qlen) {
  268. struct request_sock *req1;
  269. spin_lock(&fastopenq->lock);
  270. req1 = fastopenq->rskq_rst_head;
  271. if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
  272. __NET_INC_STATS(sock_net(sk),
  273. LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
  274. spin_unlock(&fastopenq->lock);
  275. return false;
  276. }
  277. fastopenq->rskq_rst_head = req1->dl_next;
  278. fastopenq->qlen--;
  279. spin_unlock(&fastopenq->lock);
  280. reqsk_put(req1);
  281. }
  282. return true;
  283. }
  284. static bool tcp_fastopen_no_cookie(const struct sock *sk,
  285. const struct dst_entry *dst,
  286. int flag)
  287. {
  288. return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) ||
  289. tcp_sk(sk)->fastopen_no_cookie ||
  290. (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
  291. }
  292. /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
  293. * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
  294. * cookie request (foc->len == 0).
  295. */
  296. struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
  297. struct request_sock *req,
  298. struct tcp_fastopen_cookie *foc,
  299. const struct dst_entry *dst)
  300. {
  301. bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
  302. int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
  303. struct tcp_fastopen_cookie valid_foc = { .len = -1 };
  304. struct sock *child;
  305. int ret = 0;
  306. if (foc->len == 0) /* Client requests a cookie */
  307. NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
  308. if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
  309. (syn_data || foc->len >= 0) &&
  310. tcp_fastopen_queue_check(sk))) {
  311. foc->len = -1;
  312. return NULL;
  313. }
  314. if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
  315. goto fastopen;
  316. if (foc->len == 0) {
  317. /* Client requests a cookie. */
  318. tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
  319. } else if (foc->len > 0) {
  320. ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
  321. &valid_foc);
  322. if (!ret) {
  323. NET_INC_STATS(sock_net(sk),
  324. LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
  325. } else {
  326. /* Cookie is valid. Create a (full) child socket to
  327. * accept the data in SYN before returning a SYN-ACK to
  328. * ack the data. If we fail to create the socket, fall
  329. * back and ack the ISN only but includes the same
  330. * cookie.
  331. *
  332. * Note: Data-less SYN with valid cookie is allowed to
  333. * send data in SYN_RECV state.
  334. */
  335. fastopen:
  336. child = tcp_fastopen_create_child(sk, skb, req);
  337. if (child) {
  338. if (ret == 2) {
  339. valid_foc.exp = foc->exp;
  340. *foc = valid_foc;
  341. NET_INC_STATS(sock_net(sk),
  342. LINUX_MIB_TCPFASTOPENPASSIVEALTKEY);
  343. } else {
  344. foc->len = -1;
  345. }
  346. NET_INC_STATS(sock_net(sk),
  347. LINUX_MIB_TCPFASTOPENPASSIVE);
  348. return child;
  349. }
  350. NET_INC_STATS(sock_net(sk),
  351. LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
  352. }
  353. }
  354. valid_foc.exp = foc->exp;
  355. *foc = valid_foc;
  356. return NULL;
  357. }
  358. bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
  359. struct tcp_fastopen_cookie *cookie)
  360. {
  361. const struct dst_entry *dst;
  362. tcp_fastopen_cache_get(sk, mss, cookie);
  363. /* Firewall blackhole issue check */
  364. if (tcp_fastopen_active_should_disable(sk)) {
  365. cookie->len = -1;
  366. return false;
  367. }
  368. dst = __sk_dst_get(sk);
  369. if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
  370. cookie->len = -1;
  371. return true;
  372. }
  373. if (cookie->len > 0)
  374. return true;
  375. tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE;
  376. return false;
  377. }
  378. /* This function checks if we want to defer sending SYN until the first
  379. * write(). We defer under the following conditions:
  380. * 1. fastopen_connect sockopt is set
  381. * 2. we have a valid cookie
  382. * Return value: return true if we want to defer until application writes data
  383. * return false if we want to send out SYN immediately
  384. */
  385. bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
  386. {
  387. struct tcp_fastopen_cookie cookie = { .len = 0 };
  388. struct tcp_sock *tp = tcp_sk(sk);
  389. u16 mss;
  390. if (tp->fastopen_connect && !tp->fastopen_req) {
  391. if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
  392. inet_sk(sk)->defer_connect = 1;
  393. return true;
  394. }
  395. /* Alloc fastopen_req in order for FO option to be included
  396. * in SYN
  397. */
  398. tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
  399. sk->sk_allocation);
  400. if (tp->fastopen_req)
  401. tp->fastopen_req->cookie = cookie;
  402. else
  403. *err = -ENOBUFS;
  404. }
  405. return false;
  406. }
  407. EXPORT_SYMBOL(tcp_fastopen_defer_connect);
  408. /*
  409. * The following code block is to deal with middle box issues with TFO:
  410. * Middlebox firewall issues can potentially cause server's data being
  411. * blackholed after a successful 3WHS using TFO.
  412. * The proposed solution is to disable active TFO globally under the
  413. * following circumstances:
  414. * 1. client side TFO socket receives out of order FIN
  415. * 2. client side TFO socket receives out of order RST
  416. * 3. client side TFO socket has timed out three times consecutively during
  417. * or after handshake
  418. * We disable active side TFO globally for 1hr at first. Then if it
  419. * happens again, we disable it for 2h, then 4h, 8h, ...
  420. * And we reset the timeout back to 1hr when we see a successful active
  421. * TFO connection with data exchanges.
  422. */
  423. /* Disable active TFO and record current jiffies and
  424. * tfo_active_disable_times
  425. */
  426. void tcp_fastopen_active_disable(struct sock *sk)
  427. {
  428. struct net *net = sock_net(sk);
  429. if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout))
  430. return;
  431. /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
  432. WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);
  433. /* Paired with smp_rmb() in tcp_fastopen_active_should_disable().
  434. * We want net->ipv4.tfo_active_disable_stamp to be updated first.
  435. */
  436. smp_mb__before_atomic();
  437. atomic_inc(&net->ipv4.tfo_active_disable_times);
  438. NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
  439. }
  440. /* Calculate timeout for tfo active disable
  441. * Return true if we are still in the active TFO disable period
  442. * Return false if timeout already expired and we should use active TFO
  443. */
  444. bool tcp_fastopen_active_should_disable(struct sock *sk)
  445. {
  446. unsigned int tfo_bh_timeout =
  447. READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout);
  448. unsigned long timeout;
  449. int tfo_da_times;
  450. int multiplier;
  451. if (!tfo_bh_timeout)
  452. return false;
  453. tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
  454. if (!tfo_da_times)
  455. return false;
  456. /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */
  457. smp_rmb();
  458. /* Limit timeout to max: 2^6 * initial timeout */
  459. multiplier = 1 << min(tfo_da_times - 1, 6);
  460. /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */
  461. timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
  462. multiplier * tfo_bh_timeout * HZ;
  463. if (time_before(jiffies, timeout))
  464. return true;
  465. /* Mark check bit so we can check for successful active TFO
  466. * condition and reset tfo_active_disable_times
  467. */
  468. tcp_sk(sk)->syn_fastopen_ch = 1;
  469. return false;
  470. }
  471. /* Disable active TFO if FIN is the only packet in the ofo queue
  472. * and no data is received.
  473. * Also check if we can reset tfo_active_disable_times if data is
  474. * received successfully on a marked active TFO sockets opened on
  475. * a non-loopback interface
  476. */
  477. void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
  478. {
  479. struct tcp_sock *tp = tcp_sk(sk);
  480. struct dst_entry *dst;
  481. struct sk_buff *skb;
  482. if (!tp->syn_fastopen)
  483. return;
  484. if (!tp->data_segs_in) {
  485. skb = skb_rb_first(&tp->out_of_order_queue);
  486. if (skb && !skb_rb_next(skb)) {
  487. if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
  488. tcp_fastopen_active_disable(sk);
  489. return;
  490. }
  491. }
  492. } else if (tp->syn_fastopen_ch &&
  493. atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
  494. dst = sk_dst_get(sk);
  495. if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
  496. atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
  497. dst_release(dst);
  498. }
  499. }
  500. void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
  501. {
  502. u32 timeouts = inet_csk(sk)->icsk_retransmits;
  503. struct tcp_sock *tp = tcp_sk(sk);
  504. /* Broken middle-boxes may black-hole Fast Open connection during or
  505. * even after the handshake. Be extremely conservative and pause
  506. * Fast Open globally after hitting the third consecutive timeout or
  507. * exceeding the configured timeout limit.
  508. */
  509. if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
  510. (timeouts == 2 || (timeouts < 2 && expired))) {
  511. tcp_fastopen_active_disable(sk);
  512. NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
  513. }
  514. }