tcp: TCP Fast Open Server - support TFO listeners
This patch builds on top of the previous patch to add the support for TFO listeners. This includes - 1. allocating, properly initializing, and managing the per listener fastopen_queue structure when TFO is enabled 2. changes to the inet_csk_accept code to support TFO. E.g., the request_sock can no longer be freed upon accept(), not until 3WHS finishes 3. allowing a TCP_SYN_RECV socket to properly poll() and sendmsg() if it's a TFO socket 4. properly closing a TFO listener, and a TFO socket before 3WHS finishes 5. supporting TCP_FASTOPEN socket option 6. modifying tcp_check_req() to use to check a TFO socket as well as request_sock 7. supporting TCP's TFO cookie option 8. adding a new SYN-ACK retransmit handler to use the timer directly off the TFO socket rather than the listener socket. Note that TFO server side will not retransmit anything other than SYN-ACK until the 3WHS is completed. The patch also contains an important function "reqsk_fastopen_remove()" to manage the somewhat complex relation between a listener, its request_sock, and the corresponding child socket. See the comment above the function for the detail. Signed-off-by: H.K. Jerry Chu <hkchu@google.com> Cc: Yuchung Cheng <ycheng@google.com> Cc: Neal Cardwell <ncardwell@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
1046716368
commit
8336886f78
@@ -149,6 +149,11 @@ void inet_sock_destruct(struct sock *sk)
|
||||
pr_err("Attempt to release alive inet socket %p\n", sk);
|
||||
return;
|
||||
}
|
||||
if (sk->sk_type == SOCK_STREAM) {
|
||||
struct fastopen_queue *fastopenq =
|
||||
inet_csk(sk)->icsk_accept_queue.fastopenq;
|
||||
kfree(fastopenq);
|
||||
}
|
||||
|
||||
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
|
||||
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
|
||||
@@ -212,6 +217,26 @@ int inet_listen(struct socket *sock, int backlog)
|
||||
* we can only allow the backlog to be adjusted.
|
||||
*/
|
||||
if (old_state != TCP_LISTEN) {
|
||||
/* Check special setups for testing purpose to enable TFO w/o
|
||||
* requiring TCP_FASTOPEN sockopt.
|
||||
* Note that only TCP sockets (SOCK_STREAM) will reach here.
|
||||
* Also fastopenq may already been allocated because this
|
||||
* socket was in TCP_LISTEN state previously but was
|
||||
* shutdown() (rather than close()).
|
||||
*/
|
||||
if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
|
||||
inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) {
|
||||
if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
|
||||
err = fastopen_init_queue(sk, backlog);
|
||||
else if ((sysctl_tcp_fastopen &
|
||||
TFO_SERVER_WO_SOCKOPT2) != 0)
|
||||
err = fastopen_init_queue(sk,
|
||||
((uint)sysctl_tcp_fastopen) >> 16);
|
||||
else
|
||||
err = 0;
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
err = inet_csk_listen_start(sk, backlog);
|
||||
if (err)
|
||||
goto out;
|
||||
@@ -701,7 +726,8 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
|
||||
sock_rps_record_flow(sk2);
|
||||
WARN_ON(!((1 << sk2->sk_state) &
|
||||
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
|
||||
(TCPF_ESTABLISHED | TCPF_SYN_RECV |
|
||||
TCPF_CLOSE_WAIT | TCPF_CLOSE)));
|
||||
|
||||
sock_graft(sk2, newsock);
|
||||
|
||||
|
Reference in New Issue
Block a user