Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Conflicts: net/netfilter/core.c net/netfilter/nf_tables_netdev.c Resolve two conflicts before pull request for David's net-next tree: 1) Betweenc73c248490
("netfilter: nf_tables_netdev: remove redundant ip_hdr assignment") from the net tree and commitddc8b6027a
("netfilter: introduce nft_set_pktinfo_{ipv4, ipv6}_validate()"). 2) Betweene8bffe0cf9
("net: Add _nf_(un)register_hooks symbols") and Aaron Conole's patches to replace list_head with single linked list. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
@@ -289,6 +289,7 @@ static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr
|
||||
static void tcp_sndbuf_expand(struct sock *sk)
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
|
||||
int sndmem, per_mss;
|
||||
u32 nr_segs;
|
||||
|
||||
@@ -309,7 +310,8 @@ static void tcp_sndbuf_expand(struct sock *sk)
|
||||
* Cubic needs 1.7 factor, rounded to 2 to include
|
||||
* extra cushion (application might react slowly to POLLOUT)
|
||||
*/
|
||||
sndmem = 2 * nr_segs * per_mss;
|
||||
sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2;
|
||||
sndmem *= nr_segs * per_mss;
|
||||
|
||||
if (sk->sk_sndbuf < sndmem)
|
||||
sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
|
||||
@@ -899,12 +901,29 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
|
||||
tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
|
||||
}
|
||||
|
||||
/* Sum the number of packets on the wire we have marked as lost.
|
||||
* There are two cases we care about here:
|
||||
* a) Packet hasn't been marked lost (nor retransmitted),
|
||||
* and this is the first loss.
|
||||
* b) Packet has been marked both lost and retransmitted,
|
||||
* and this means we think it was lost again.
|
||||
*/
|
||||
static void tcp_sum_lost(struct tcp_sock *tp, struct sk_buff *skb)
|
||||
{
|
||||
__u8 sacked = TCP_SKB_CB(skb)->sacked;
|
||||
|
||||
if (!(sacked & TCPCB_LOST) ||
|
||||
((sacked & TCPCB_LOST) && (sacked & TCPCB_SACKED_RETRANS)))
|
||||
tp->lost += tcp_skb_pcount(skb);
|
||||
}
|
||||
|
||||
static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
|
||||
{
|
||||
if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
|
||||
tcp_verify_retransmit_hint(tp, skb);
|
||||
|
||||
tp->lost_out += tcp_skb_pcount(skb);
|
||||
tcp_sum_lost(tp, skb);
|
||||
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
|
||||
}
|
||||
}
|
||||
@@ -913,6 +932,7 @@ void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
|
||||
{
|
||||
tcp_verify_retransmit_hint(tp, skb);
|
||||
|
||||
tcp_sum_lost(tp, skb);
|
||||
if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
|
||||
tp->lost_out += tcp_skb_pcount(skb);
|
||||
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
|
||||
@@ -1094,6 +1114,7 @@ struct tcp_sacktag_state {
|
||||
*/
|
||||
struct skb_mstamp first_sackt;
|
||||
struct skb_mstamp last_sackt;
|
||||
struct rate_sample *rate;
|
||||
int flag;
|
||||
};
|
||||
|
||||
@@ -1261,6 +1282,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
|
||||
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
|
||||
start_seq, end_seq, dup_sack, pcount,
|
||||
&skb->skb_mstamp);
|
||||
tcp_rate_skb_delivered(sk, skb, state->rate);
|
||||
|
||||
if (skb == tp->lost_skb_hint)
|
||||
tp->lost_cnt_hint += pcount;
|
||||
@@ -1311,6 +1333,9 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
|
||||
tcp_advance_highest_sack(sk, skb);
|
||||
|
||||
tcp_skb_collapse_tstamp(prev, skb);
|
||||
if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp.v64))
|
||||
TCP_SKB_CB(prev)->tx.delivered_mstamp.v64 = 0;
|
||||
|
||||
tcp_unlink_write_queue(skb, sk);
|
||||
sk_wmem_free_skb(sk, skb);
|
||||
|
||||
@@ -1540,6 +1565,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
|
||||
dup_sack,
|
||||
tcp_skb_pcount(skb),
|
||||
&skb->skb_mstamp);
|
||||
tcp_rate_skb_delivered(sk, skb, state->rate);
|
||||
|
||||
if (!before(TCP_SKB_CB(skb)->seq,
|
||||
tcp_highest_sack_seq(tp)))
|
||||
@@ -1622,8 +1648,10 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
|
||||
|
||||
found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
|
||||
num_sacks, prior_snd_una);
|
||||
if (found_dup_sack)
|
||||
if (found_dup_sack) {
|
||||
state->flag |= FLAG_DSACKING_ACK;
|
||||
tp->delivered++; /* A spurious retransmission is delivered */
|
||||
}
|
||||
|
||||
/* Eliminate too old ACKs, but take into
|
||||
* account more or less fresh ones, they can
|
||||
@@ -1890,6 +1918,7 @@ void tcp_enter_loss(struct sock *sk)
|
||||
struct sk_buff *skb;
|
||||
bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
|
||||
bool is_reneg; /* is receiver reneging on SACKs? */
|
||||
bool mark_lost;
|
||||
|
||||
/* Reduce ssthresh if it has not yet been made inside this window. */
|
||||
if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
|
||||
@@ -1923,8 +1952,12 @@ void tcp_enter_loss(struct sock *sk)
|
||||
if (skb == tcp_send_head(sk))
|
||||
break;
|
||||
|
||||
mark_lost = (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) ||
|
||||
is_reneg);
|
||||
if (mark_lost)
|
||||
tcp_sum_lost(tp, skb);
|
||||
TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;
|
||||
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || is_reneg) {
|
||||
if (mark_lost) {
|
||||
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
|
||||
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
|
||||
tp->lost_out += tcp_skb_pcount(skb);
|
||||
@@ -2503,6 +2536,9 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
if (inet_csk(sk)->icsk_ca_ops->cong_control)
|
||||
return;
|
||||
|
||||
/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
|
||||
if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
|
||||
(tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
|
||||
@@ -2879,67 +2915,13 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
|
||||
*rexmit = REXMIT_LOST;
|
||||
}
|
||||
|
||||
/* Kathleen Nichols' algorithm for tracking the minimum value of
|
||||
* a data stream over some fixed time interval. (E.g., the minimum
|
||||
* RTT over the past five minutes.) It uses constant space and constant
|
||||
* time per update yet almost always delivers the same minimum as an
|
||||
* implementation that has to keep all the data in the window.
|
||||
*
|
||||
* The algorithm keeps track of the best, 2nd best & 3rd best min
|
||||
* values, maintaining an invariant that the measurement time of the
|
||||
* n'th best >= n-1'th best. It also makes sure that the three values
|
||||
* are widely separated in the time window since that bounds the worse
|
||||
* case error when that data is monotonically increasing over the window.
|
||||
*
|
||||
* Upon getting a new min, we can forget everything earlier because it
|
||||
* has no value - the new min is <= everything else in the window by
|
||||
* definition and it's the most recent. So we restart fresh on every new min
|
||||
* and overwrites 2nd & 3rd choices. The same property holds for 2nd & 3rd
|
||||
* best.
|
||||
*/
|
||||
static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us)
|
||||
{
|
||||
const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ;
|
||||
struct rtt_meas *m = tcp_sk(sk)->rtt_min;
|
||||
struct rtt_meas rttm = {
|
||||
.rtt = likely(rtt_us) ? rtt_us : jiffies_to_usecs(1),
|
||||
.ts = now,
|
||||
};
|
||||
u32 elapsed;
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
u32 wlen = sysctl_tcp_min_rtt_wlen * HZ;
|
||||
|
||||
/* Check if the new measurement updates the 1st, 2nd, or 3rd choices */
|
||||
if (unlikely(rttm.rtt <= m[0].rtt))
|
||||
m[0] = m[1] = m[2] = rttm;
|
||||
else if (rttm.rtt <= m[1].rtt)
|
||||
m[1] = m[2] = rttm;
|
||||
else if (rttm.rtt <= m[2].rtt)
|
||||
m[2] = rttm;
|
||||
|
||||
elapsed = now - m[0].ts;
|
||||
if (unlikely(elapsed > wlen)) {
|
||||
/* Passed entire window without a new min so make 2nd choice
|
||||
* the new min & 3rd choice the new 2nd. So forth and so on.
|
||||
*/
|
||||
m[0] = m[1];
|
||||
m[1] = m[2];
|
||||
m[2] = rttm;
|
||||
if (now - m[0].ts > wlen) {
|
||||
m[0] = m[1];
|
||||
m[1] = rttm;
|
||||
if (now - m[0].ts > wlen)
|
||||
m[0] = rttm;
|
||||
}
|
||||
} else if (m[1].ts == m[0].ts && elapsed > wlen / 4) {
|
||||
/* Passed a quarter of the window without a new min so
|
||||
* take 2nd choice from the 2nd quarter of the window.
|
||||
*/
|
||||
m[2] = m[1] = rttm;
|
||||
} else if (m[2].ts == m[1].ts && elapsed > wlen / 2) {
|
||||
/* Passed half the window without a new min so take the 3rd
|
||||
* choice from the last half of the window.
|
||||
*/
|
||||
m[2] = rttm;
|
||||
}
|
||||
minmax_running_min(&tp->rtt_min, wlen, tcp_time_stamp,
|
||||
rtt_us ? : jiffies_to_usecs(1));
|
||||
}
|
||||
|
||||
static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
|
||||
@@ -3102,10 +3084,11 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
|
||||
*/
|
||||
static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
|
||||
u32 prior_snd_una, int *acked,
|
||||
struct tcp_sacktag_state *sack)
|
||||
struct tcp_sacktag_state *sack,
|
||||
struct skb_mstamp *now)
|
||||
{
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct skb_mstamp first_ackt, last_ackt, now;
|
||||
struct skb_mstamp first_ackt, last_ackt;
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
u32 prior_sacked = tp->sacked_out;
|
||||
u32 reord = tp->packets_out;
|
||||
@@ -3137,7 +3120,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
|
||||
acked_pcount = tcp_tso_acked(sk, skb);
|
||||
if (!acked_pcount)
|
||||
break;
|
||||
|
||||
fully_acked = false;
|
||||
} else {
|
||||
/* Speedup tcp_unlink_write_queue() and next loop */
|
||||
@@ -3173,6 +3155,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
|
||||
|
||||
tp->packets_out -= acked_pcount;
|
||||
pkts_acked += acked_pcount;
|
||||
tcp_rate_skb_delivered(sk, skb, sack->rate);
|
||||
|
||||
/* Initial outgoing SYN's get put onto the write_queue
|
||||
* just like anything else we transmit. It is not
|
||||
@@ -3205,16 +3188,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
|
||||
if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
|
||||
flag |= FLAG_SACK_RENEGING;
|
||||
|
||||
skb_mstamp_get(&now);
|
||||
if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
|
||||
seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt);
|
||||
ca_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
|
||||
seq_rtt_us = skb_mstamp_us_delta(now, &first_ackt);
|
||||
ca_rtt_us = skb_mstamp_us_delta(now, &last_ackt);
|
||||
}
|
||||
if (sack->first_sackt.v64) {
|
||||
sack_rtt_us = skb_mstamp_us_delta(&now, &sack->first_sackt);
|
||||
ca_rtt_us = skb_mstamp_us_delta(&now, &sack->last_sackt);
|
||||
sack_rtt_us = skb_mstamp_us_delta(now, &sack->first_sackt);
|
||||
ca_rtt_us = skb_mstamp_us_delta(now, &sack->last_sackt);
|
||||
}
|
||||
|
||||
sack->rate->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet, or -1 */
|
||||
rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
|
||||
ca_rtt_us);
|
||||
|
||||
@@ -3242,7 +3224,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
|
||||
tp->fackets_out -= min(pkts_acked, tp->fackets_out);
|
||||
|
||||
} else if (skb && rtt_update && sack_rtt_us >= 0 &&
|
||||
sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) {
|
||||
sack_rtt_us > skb_mstamp_us_delta(now, &skb->skb_mstamp)) {
|
||||
/* Do not re-arm RTO if the sack RTT is measured from data sent
|
||||
* after when the head was last (re)transmitted. Otherwise the
|
||||
* timeout may continue to extend in loss recovery.
|
||||
@@ -3333,8 +3315,15 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
|
||||
* information. All transmission or retransmission are delayed afterwards.
|
||||
*/
|
||||
static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked,
|
||||
int flag)
|
||||
int flag, const struct rate_sample *rs)
|
||||
{
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
|
||||
if (icsk->icsk_ca_ops->cong_control) {
|
||||
icsk->icsk_ca_ops->cong_control(sk, rs);
|
||||
return;
|
||||
}
|
||||
|
||||
if (tcp_in_cwnd_reduction(sk)) {
|
||||
/* Reduce cwnd if state mandates */
|
||||
tcp_cwnd_reduction(sk, acked_sacked, flag);
|
||||
@@ -3579,17 +3568,21 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct tcp_sacktag_state sack_state;
|
||||
struct rate_sample rs = { .prior_delivered = 0 };
|
||||
u32 prior_snd_una = tp->snd_una;
|
||||
u32 ack_seq = TCP_SKB_CB(skb)->seq;
|
||||
u32 ack = TCP_SKB_CB(skb)->ack_seq;
|
||||
bool is_dupack = false;
|
||||
u32 prior_fackets;
|
||||
int prior_packets = tp->packets_out;
|
||||
u32 prior_delivered = tp->delivered;
|
||||
u32 delivered = tp->delivered;
|
||||
u32 lost = tp->lost;
|
||||
int acked = 0; /* Number of packets newly acked */
|
||||
int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
|
||||
struct skb_mstamp now;
|
||||
|
||||
sack_state.first_sackt.v64 = 0;
|
||||
sack_state.rate = &rs;
|
||||
|
||||
/* We very likely will need to access write queue head. */
|
||||
prefetchw(sk->sk_write_queue.next);
|
||||
@@ -3612,6 +3605,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||
if (after(ack, tp->snd_nxt))
|
||||
goto invalid_ack;
|
||||
|
||||
skb_mstamp_get(&now);
|
||||
|
||||
if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
|
||||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
|
||||
tcp_rearm_rto(sk);
|
||||
@@ -3622,6 +3617,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||
}
|
||||
|
||||
prior_fackets = tp->fackets_out;
|
||||
rs.prior_in_flight = tcp_packets_in_flight(tp);
|
||||
|
||||
/* ts_recent update must be made after we are sure that the packet
|
||||
* is in window.
|
||||
@@ -3677,7 +3673,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||
|
||||
/* See if we can take anything off of the retransmit queue. */
|
||||
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
|
||||
&sack_state);
|
||||
&sack_state, &now);
|
||||
|
||||
if (tcp_ack_is_dubious(sk, flag)) {
|
||||
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
|
||||
@@ -3694,7 +3690,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||
|
||||
if (icsk->icsk_pending == ICSK_TIME_RETRANS)
|
||||
tcp_schedule_loss_probe(sk);
|
||||
tcp_cong_control(sk, ack, tp->delivered - prior_delivered, flag);
|
||||
delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
|
||||
lost = tp->lost - lost; /* freshly marked lost */
|
||||
tcp_rate_gen(sk, delivered, lost, &now, &rs);
|
||||
tcp_cong_control(sk, ack, delivered, flag, &rs);
|
||||
tcp_xmit_recovery(sk, rexmit);
|
||||
return 1;
|
||||
|
||||
@@ -4108,7 +4107,7 @@ void tcp_fin(struct sock *sk)
|
||||
/* It _is_ possible, that we have something out-of-order _after_ FIN.
|
||||
* Probably, we should reset in this case. For now drop them.
|
||||
*/
|
||||
__skb_queue_purge(&tp->out_of_order_queue);
|
||||
skb_rbtree_purge(&tp->out_of_order_queue);
|
||||
if (tcp_is_sack(tp))
|
||||
tcp_sack_reset(&tp->rx_opt);
|
||||
sk_mem_reclaim(sk);
|
||||
@@ -4268,7 +4267,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
|
||||
int this_sack;
|
||||
|
||||
/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
|
||||
if (skb_queue_empty(&tp->out_of_order_queue)) {
|
||||
if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
|
||||
tp->rx_opt.num_sacks = 0;
|
||||
return;
|
||||
}
|
||||
@@ -4344,10 +4343,13 @@ static void tcp_ofo_queue(struct sock *sk)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
__u32 dsack_high = tp->rcv_nxt;
|
||||
bool fin, fragstolen, eaten;
|
||||
struct sk_buff *skb, *tail;
|
||||
bool fragstolen, eaten;
|
||||
struct rb_node *p;
|
||||
|
||||
while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) {
|
||||
p = rb_first(&tp->out_of_order_queue);
|
||||
while (p) {
|
||||
skb = rb_entry(p, struct sk_buff, rbnode);
|
||||
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
|
||||
break;
|
||||
|
||||
@@ -4357,9 +4359,10 @@ static void tcp_ofo_queue(struct sock *sk)
|
||||
dsack_high = TCP_SKB_CB(skb)->end_seq;
|
||||
tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
|
||||
}
|
||||
p = rb_next(p);
|
||||
rb_erase(&skb->rbnode, &tp->out_of_order_queue);
|
||||
|
||||
__skb_unlink(skb, &tp->out_of_order_queue);
|
||||
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
|
||||
if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) {
|
||||
SOCK_DEBUG(sk, "ofo packet was already received\n");
|
||||
tcp_drop(sk, skb);
|
||||
continue;
|
||||
@@ -4371,12 +4374,19 @@ static void tcp_ofo_queue(struct sock *sk)
|
||||
tail = skb_peek_tail(&sk->sk_receive_queue);
|
||||
eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
|
||||
tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
|
||||
fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
|
||||
if (!eaten)
|
||||
__skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
|
||||
tcp_fin(sk);
|
||||
if (eaten)
|
||||
else
|
||||
kfree_skb_partial(skb, fragstolen);
|
||||
|
||||
if (unlikely(fin)) {
|
||||
tcp_fin(sk);
|
||||
/* tcp_fin() purges tp->out_of_order_queue,
|
||||
* so we must end this loop right now.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4403,8 +4413,10 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
|
||||
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct rb_node **p, *q, *parent;
|
||||
struct sk_buff *skb1;
|
||||
u32 seq, end_seq;
|
||||
bool fragstolen;
|
||||
|
||||
tcp_ecn_check_ce(tp, skb);
|
||||
|
||||
@@ -4419,88 +4431,92 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
inet_csk_schedule_ack(sk);
|
||||
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
|
||||
seq = TCP_SKB_CB(skb)->seq;
|
||||
end_seq = TCP_SKB_CB(skb)->end_seq;
|
||||
SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
|
||||
tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
|
||||
tp->rcv_nxt, seq, end_seq);
|
||||
|
||||
skb1 = skb_peek_tail(&tp->out_of_order_queue);
|
||||
if (!skb1) {
|
||||
p = &tp->out_of_order_queue.rb_node;
|
||||
if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
|
||||
/* Initial out of order segment, build 1 SACK. */
|
||||
if (tcp_is_sack(tp)) {
|
||||
tp->rx_opt.num_sacks = 1;
|
||||
tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
|
||||
tp->selective_acks[0].end_seq =
|
||||
TCP_SKB_CB(skb)->end_seq;
|
||||
tp->selective_acks[0].start_seq = seq;
|
||||
tp->selective_acks[0].end_seq = end_seq;
|
||||
}
|
||||
__skb_queue_head(&tp->out_of_order_queue, skb);
|
||||
rb_link_node(&skb->rbnode, NULL, p);
|
||||
rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
|
||||
tp->ooo_last_skb = skb;
|
||||
goto end;
|
||||
}
|
||||
|
||||
seq = TCP_SKB_CB(skb)->seq;
|
||||
end_seq = TCP_SKB_CB(skb)->end_seq;
|
||||
|
||||
if (seq == TCP_SKB_CB(skb1)->end_seq) {
|
||||
bool fragstolen;
|
||||
|
||||
if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
|
||||
__skb_queue_after(&tp->out_of_order_queue, skb1, skb);
|
||||
} else {
|
||||
tcp_grow_window(sk, skb);
|
||||
kfree_skb_partial(skb, fragstolen);
|
||||
skb = NULL;
|
||||
}
|
||||
|
||||
if (!tp->rx_opt.num_sacks ||
|
||||
tp->selective_acks[0].end_seq != seq)
|
||||
goto add_sack;
|
||||
|
||||
/* Common case: data arrive in order after hole. */
|
||||
tp->selective_acks[0].end_seq = end_seq;
|
||||
goto end;
|
||||
/* In the typical case, we are adding an skb to the end of the list.
|
||||
* Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
|
||||
*/
|
||||
if (tcp_try_coalesce(sk, tp->ooo_last_skb, skb, &fragstolen)) {
|
||||
coalesce_done:
|
||||
tcp_grow_window(sk, skb);
|
||||
kfree_skb_partial(skb, fragstolen);
|
||||
skb = NULL;
|
||||
goto add_sack;
|
||||
}
|
||||
/* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
|
||||
if (!before(seq, TCP_SKB_CB(tp->ooo_last_skb)->end_seq)) {
|
||||
parent = &tp->ooo_last_skb->rbnode;
|
||||
p = &parent->rb_right;
|
||||
goto insert;
|
||||
}
|
||||
|
||||
/* Find place to insert this segment. */
|
||||
while (1) {
|
||||
if (!after(TCP_SKB_CB(skb1)->seq, seq))
|
||||
break;
|
||||
if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
|
||||
skb1 = NULL;
|
||||
break;
|
||||
/* Find place to insert this segment. Handle overlaps on the way. */
|
||||
parent = NULL;
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
skb1 = rb_entry(parent, struct sk_buff, rbnode);
|
||||
if (before(seq, TCP_SKB_CB(skb1)->seq)) {
|
||||
p = &parent->rb_left;
|
||||
continue;
|
||||
}
|
||||
skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
|
||||
if (before(seq, TCP_SKB_CB(skb1)->end_seq)) {
|
||||
if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
|
||||
/* All the bits are present. Drop. */
|
||||
NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPOFOMERGE);
|
||||
__kfree_skb(skb);
|
||||
skb = NULL;
|
||||
tcp_dsack_set(sk, seq, end_seq);
|
||||
goto add_sack;
|
||||
}
|
||||
if (after(seq, TCP_SKB_CB(skb1)->seq)) {
|
||||
/* Partial overlap. */
|
||||
tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq);
|
||||
} else {
|
||||
/* skb's seq == skb1's seq and skb covers skb1.
|
||||
* Replace skb1 with skb.
|
||||
*/
|
||||
rb_replace_node(&skb1->rbnode, &skb->rbnode,
|
||||
&tp->out_of_order_queue);
|
||||
tcp_dsack_extend(sk,
|
||||
TCP_SKB_CB(skb1)->seq,
|
||||
TCP_SKB_CB(skb1)->end_seq);
|
||||
NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPOFOMERGE);
|
||||
__kfree_skb(skb1);
|
||||
goto merge_right;
|
||||
}
|
||||
} else if (tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
|
||||
goto coalesce_done;
|
||||
}
|
||||
p = &parent->rb_right;
|
||||
}
|
||||
insert:
|
||||
/* Insert segment into RB tree. */
|
||||
rb_link_node(&skb->rbnode, parent, p);
|
||||
rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
|
||||
|
||||
/* Do skb overlap to previous one? */
|
||||
if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
|
||||
if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
|
||||
/* All the bits are present. Drop. */
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
|
||||
tcp_drop(sk, skb);
|
||||
skb = NULL;
|
||||
tcp_dsack_set(sk, seq, end_seq);
|
||||
goto add_sack;
|
||||
}
|
||||
if (after(seq, TCP_SKB_CB(skb1)->seq)) {
|
||||
/* Partial overlap. */
|
||||
tcp_dsack_set(sk, seq,
|
||||
TCP_SKB_CB(skb1)->end_seq);
|
||||
} else {
|
||||
if (skb_queue_is_first(&tp->out_of_order_queue,
|
||||
skb1))
|
||||
skb1 = NULL;
|
||||
else
|
||||
skb1 = skb_queue_prev(
|
||||
&tp->out_of_order_queue,
|
||||
skb1);
|
||||
}
|
||||
}
|
||||
if (!skb1)
|
||||
__skb_queue_head(&tp->out_of_order_queue, skb);
|
||||
else
|
||||
__skb_queue_after(&tp->out_of_order_queue, skb1, skb);
|
||||
|
||||
/* And clean segments covered by new one as whole. */
|
||||
while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
|
||||
skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
|
||||
merge_right:
|
||||
/* Remove other segments covered by skb. */
|
||||
while ((q = rb_next(&skb->rbnode)) != NULL) {
|
||||
skb1 = rb_entry(q, struct sk_buff, rbnode);
|
||||
|
||||
if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
|
||||
break;
|
||||
@@ -4509,12 +4525,15 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
end_seq);
|
||||
break;
|
||||
}
|
||||
__skb_unlink(skb1, &tp->out_of_order_queue);
|
||||
rb_erase(&skb1->rbnode, &tp->out_of_order_queue);
|
||||
tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
|
||||
TCP_SKB_CB(skb1)->end_seq);
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
|
||||
tcp_drop(sk, skb1);
|
||||
}
|
||||
/* If there is no skb after us, we are the last_skb ! */
|
||||
if (!q)
|
||||
tp->ooo_last_skb = skb;
|
||||
|
||||
add_sack:
|
||||
if (tcp_is_sack(tp))
|
||||
@@ -4651,13 +4670,13 @@ queue_and_out:
|
||||
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
|
||||
tcp_fin(sk);
|
||||
|
||||
if (!skb_queue_empty(&tp->out_of_order_queue)) {
|
||||
if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
|
||||
tcp_ofo_queue(sk);
|
||||
|
||||
/* RFC2581. 4.2. SHOULD send immediate ACK, when
|
||||
* gap in queue is filled.
|
||||
*/
|
||||
if (skb_queue_empty(&tp->out_of_order_queue))
|
||||
if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
|
||||
inet_csk(sk)->icsk_ack.pingpong = 0;
|
||||
}
|
||||
|
||||
@@ -4711,48 +4730,76 @@ drop:
|
||||
tcp_data_queue_ofo(sk, skb);
|
||||
}
|
||||
|
||||
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
|
||||
struct sk_buff_head *list)
|
||||
static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *list)
|
||||
{
|
||||
struct sk_buff *next = NULL;
|
||||
if (list)
|
||||
return !skb_queue_is_last(list, skb) ? skb->next : NULL;
|
||||
|
||||
if (!skb_queue_is_last(list, skb))
|
||||
next = skb_queue_next(list, skb);
|
||||
return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
|
||||
}
|
||||
|
||||
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
|
||||
struct sk_buff_head *list,
|
||||
struct rb_root *root)
|
||||
{
|
||||
struct sk_buff *next = tcp_skb_next(skb, list);
|
||||
|
||||
if (list)
|
||||
__skb_unlink(skb, list);
|
||||
else
|
||||
rb_erase(&skb->rbnode, root);
|
||||
|
||||
__skb_unlink(skb, list);
|
||||
__kfree_skb(skb);
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
/* Insert skb into rb tree, ordered by TCP_SKB_CB(skb)->seq */
|
||||
static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
|
||||
{
|
||||
struct rb_node **p = &root->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct sk_buff *skb1;
|
||||
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
skb1 = rb_entry(parent, struct sk_buff, rbnode);
|
||||
if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
|
||||
p = &parent->rb_left;
|
||||
else
|
||||
p = &parent->rb_right;
|
||||
}
|
||||
rb_link_node(&skb->rbnode, parent, p);
|
||||
rb_insert_color(&skb->rbnode, root);
|
||||
}
|
||||
|
||||
/* Collapse contiguous sequence of skbs head..tail with
|
||||
* sequence numbers start..end.
|
||||
*
|
||||
* If tail is NULL, this means until the end of the list.
|
||||
* If tail is NULL, this means until the end of the queue.
|
||||
*
|
||||
* Segments with FIN/SYN are not collapsed (only because this
|
||||
* simplifies code)
|
||||
*/
|
||||
static void
|
||||
tcp_collapse(struct sock *sk, struct sk_buff_head *list,
|
||||
struct sk_buff *head, struct sk_buff *tail,
|
||||
u32 start, u32 end)
|
||||
tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
|
||||
struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end)
|
||||
{
|
||||
struct sk_buff *skb, *n;
|
||||
struct sk_buff *skb = head, *n;
|
||||
struct sk_buff_head tmp;
|
||||
bool end_of_skbs;
|
||||
|
||||
/* First, check that queue is collapsible and find
|
||||
* the point where collapsing can be useful. */
|
||||
skb = head;
|
||||
* the point where collapsing can be useful.
|
||||
*/
|
||||
restart:
|
||||
end_of_skbs = true;
|
||||
skb_queue_walk_from_safe(list, skb, n) {
|
||||
if (skb == tail)
|
||||
break;
|
||||
for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) {
|
||||
n = tcp_skb_next(skb, list);
|
||||
|
||||
/* No new bits? It is possible on ofo queue. */
|
||||
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
|
||||
skb = tcp_collapse_one(sk, skb, list);
|
||||
skb = tcp_collapse_one(sk, skb, list, root);
|
||||
if (!skb)
|
||||
break;
|
||||
goto restart;
|
||||
@@ -4770,13 +4817,10 @@ restart:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!skb_queue_is_last(list, skb)) {
|
||||
struct sk_buff *next = skb_queue_next(list, skb);
|
||||
if (next != tail &&
|
||||
TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) {
|
||||
end_of_skbs = false;
|
||||
break;
|
||||
}
|
||||
if (n && n != tail &&
|
||||
TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
|
||||
end_of_skbs = false;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Decided to skip this, advance start seq. */
|
||||
@@ -4786,17 +4830,22 @@ restart:
|
||||
(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
|
||||
return;
|
||||
|
||||
__skb_queue_head_init(&tmp);
|
||||
|
||||
while (before(start, end)) {
|
||||
int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start);
|
||||
struct sk_buff *nskb;
|
||||
|
||||
nskb = alloc_skb(copy, GFP_ATOMIC);
|
||||
if (!nskb)
|
||||
return;
|
||||
break;
|
||||
|
||||
memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
|
||||
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
|
||||
__skb_queue_before(list, skb, nskb);
|
||||
if (list)
|
||||
__skb_queue_before(list, skb, nskb);
|
||||
else
|
||||
__skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
|
||||
skb_set_owner_r(nskb, sk);
|
||||
|
||||
/* Copy data, releasing collapsed skbs. */
|
||||
@@ -4814,14 +4863,17 @@ restart:
|
||||
start += size;
|
||||
}
|
||||
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
|
||||
skb = tcp_collapse_one(sk, skb, list);
|
||||
skb = tcp_collapse_one(sk, skb, list, root);
|
||||
if (!skb ||
|
||||
skb == tail ||
|
||||
(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
|
||||
return;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
}
|
||||
end:
|
||||
skb_queue_walk_safe(&tmp, skb, n)
|
||||
tcp_rbtree_insert(root, skb);
|
||||
}
|
||||
|
||||
/* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
|
||||
@@ -4830,43 +4882,43 @@ restart:
|
||||
static void tcp_collapse_ofo_queue(struct sock *sk)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *skb = skb_peek(&tp->out_of_order_queue);
|
||||
struct sk_buff *head;
|
||||
struct sk_buff *skb, *head;
|
||||
struct rb_node *p;
|
||||
u32 start, end;
|
||||
|
||||
if (!skb)
|
||||
p = rb_first(&tp->out_of_order_queue);
|
||||
skb = rb_entry_safe(p, struct sk_buff, rbnode);
|
||||
new_range:
|
||||
if (!skb) {
|
||||
p = rb_last(&tp->out_of_order_queue);
|
||||
/* Note: This is possible p is NULL here. We do not
|
||||
* use rb_entry_safe(), as ooo_last_skb is valid only
|
||||
* if rbtree is not empty.
|
||||
*/
|
||||
tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
|
||||
return;
|
||||
|
||||
}
|
||||
start = TCP_SKB_CB(skb)->seq;
|
||||
end = TCP_SKB_CB(skb)->end_seq;
|
||||
head = skb;
|
||||
|
||||
for (;;) {
|
||||
struct sk_buff *next = NULL;
|
||||
for (head = skb;;) {
|
||||
skb = tcp_skb_next(skb, NULL);
|
||||
|
||||
if (!skb_queue_is_last(&tp->out_of_order_queue, skb))
|
||||
next = skb_queue_next(&tp->out_of_order_queue, skb);
|
||||
skb = next;
|
||||
|
||||
/* Segment is terminated when we see gap or when
|
||||
* we are at the end of all the queue. */
|
||||
/* Range is terminated when we see a gap or when
|
||||
* we are at the queue end.
|
||||
*/
|
||||
if (!skb ||
|
||||
after(TCP_SKB_CB(skb)->seq, end) ||
|
||||
before(TCP_SKB_CB(skb)->end_seq, start)) {
|
||||
tcp_collapse(sk, &tp->out_of_order_queue,
|
||||
tcp_collapse(sk, NULL, &tp->out_of_order_queue,
|
||||
head, skb, start, end);
|
||||
head = skb;
|
||||
if (!skb)
|
||||
break;
|
||||
/* Start new segment */
|
||||
start = TCP_SKB_CB(skb)->seq;
|
||||
end = TCP_SKB_CB(skb)->end_seq;
|
||||
} else {
|
||||
if (before(TCP_SKB_CB(skb)->seq, start))
|
||||
start = TCP_SKB_CB(skb)->seq;
|
||||
if (after(TCP_SKB_CB(skb)->end_seq, end))
|
||||
end = TCP_SKB_CB(skb)->end_seq;
|
||||
goto new_range;
|
||||
}
|
||||
|
||||
if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
|
||||
start = TCP_SKB_CB(skb)->seq;
|
||||
if (after(TCP_SKB_CB(skb)->end_seq, end))
|
||||
end = TCP_SKB_CB(skb)->end_seq;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4883,20 +4935,24 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
|
||||
static bool tcp_prune_ofo_queue(struct sock *sk)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
struct rb_node *node, *prev;
|
||||
|
||||
if (skb_queue_empty(&tp->out_of_order_queue))
|
||||
if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
|
||||
return false;
|
||||
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
|
||||
|
||||
while ((skb = __skb_dequeue_tail(&tp->out_of_order_queue)) != NULL) {
|
||||
tcp_drop(sk, skb);
|
||||
node = &tp->ooo_last_skb->rbnode;
|
||||
do {
|
||||
prev = rb_prev(node);
|
||||
rb_erase(node, &tp->out_of_order_queue);
|
||||
tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
|
||||
sk_mem_reclaim(sk);
|
||||
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
|
||||
!tcp_under_memory_pressure(sk))
|
||||
break;
|
||||
}
|
||||
node = prev;
|
||||
} while (node);
|
||||
tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
|
||||
|
||||
/* Reset SACK state. A conforming SACK implementation will
|
||||
* do the same at a timeout based retransmit. When a connection
|
||||
@@ -4930,7 +4986,7 @@ static int tcp_prune_queue(struct sock *sk)
|
||||
|
||||
tcp_collapse_ofo_queue(sk);
|
||||
if (!skb_queue_empty(&sk->sk_receive_queue))
|
||||
tcp_collapse(sk, &sk->sk_receive_queue,
|
||||
tcp_collapse(sk, &sk->sk_receive_queue, NULL,
|
||||
skb_peek(&sk->sk_receive_queue),
|
||||
NULL,
|
||||
tp->copied_seq, tp->rcv_nxt);
|
||||
@@ -5035,7 +5091,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
|
||||
/* We ACK each frame or... */
|
||||
tcp_in_quickack_mode(sk) ||
|
||||
/* We have out of order data. */
|
||||
(ofo_possible && skb_peek(&tp->out_of_order_queue))) {
|
||||
(ofo_possible && !RB_EMPTY_ROOT(&tp->out_of_order_queue))) {
|
||||
/* Then ack it now */
|
||||
tcp_send_ack(sk);
|
||||
} else {
|
||||
@@ -5894,7 +5950,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
|
||||
* so release it.
|
||||
*/
|
||||
if (req) {
|
||||
tp->total_retrans = req->num_retrans;
|
||||
inet_csk(sk)->icsk_retransmits = 0;
|
||||
reqsk_fastopen_remove(sk, req, false);
|
||||
} else {
|
||||
/* Make sure socket is routed, for correct metrics. */
|
||||
@@ -5936,7 +5992,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
|
||||
} else
|
||||
tcp_init_metrics(sk);
|
||||
|
||||
tcp_update_pacing_rate(sk);
|
||||
if (!inet_csk(sk)->icsk_ca_ops->cong_control)
|
||||
tcp_update_pacing_rate(sk);
|
||||
|
||||
/* Prevent spurious tcp_cwnd_restart() on first data packet */
|
||||
tp->lsndtime = tcp_time_stamp;
|
||||
|
Reference in New Issue
Block a user