net: rename NET_{ADD|INC}_STATS_BH()
Rename NET_INC_STATS_BH() to __NET_INC_STATS() and NET_ADD_STATS_BH() to __NET_ADD_STATS() Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
b15084ec7d
commit
02a1d6e7a6
@@ -869,7 +869,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
|
||||
else
|
||||
mib_idx = LINUX_MIB_TCPSACKREORDER;
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), mib_idx);
|
||||
__NET_INC_STATS(sock_net(sk), mib_idx);
|
||||
#if FASTRETRANS_DEBUG > 1
|
||||
pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
|
||||
tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
|
||||
@@ -1062,7 +1062,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
|
||||
if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
|
||||
dup_sack = true;
|
||||
tcp_dsack_seen(tp);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
|
||||
} else if (num_sacks > 1) {
|
||||
u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
|
||||
u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
|
||||
@@ -1071,7 +1071,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
|
||||
!before(start_seq_0, start_seq_1)) {
|
||||
dup_sack = true;
|
||||
tcp_dsack_seen(tp);
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPDSACKOFORECV);
|
||||
}
|
||||
}
|
||||
@@ -1289,7 +1289,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
if (skb->len > 0) {
|
||||
BUG_ON(!tcp_skb_pcount(skb));
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1313,7 +1313,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
|
||||
tcp_unlink_write_queue(skb, sk);
|
||||
sk_wmem_free_skb(sk, skb);
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -1469,7 +1469,7 @@ noop:
|
||||
return skb;
|
||||
|
||||
fallback:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1657,7 +1657,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
|
||||
mib_idx = LINUX_MIB_TCPSACKDISCARD;
|
||||
}
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), mib_idx);
|
||||
__NET_INC_STATS(sock_net(sk), mib_idx);
|
||||
if (i == 0)
|
||||
first_sack_index = -1;
|
||||
continue;
|
||||
@@ -1909,7 +1909,7 @@ void tcp_enter_loss(struct sock *sk)
|
||||
skb = tcp_write_queue_head(sk);
|
||||
is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED);
|
||||
if (is_reneg) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
|
||||
tp->sacked_out = 0;
|
||||
tp->fackets_out = 0;
|
||||
}
|
||||
@@ -2395,7 +2395,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
|
||||
else
|
||||
mib_idx = LINUX_MIB_TCPFULLUNDO;
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), mib_idx);
|
||||
__NET_INC_STATS(sock_net(sk), mib_idx);
|
||||
}
|
||||
if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
|
||||
/* Hold old state until something *above* high_seq
|
||||
@@ -2417,7 +2417,7 @@ static bool tcp_try_undo_dsack(struct sock *sk)
|
||||
if (tp->undo_marker && !tp->undo_retrans) {
|
||||
DBGUNDO(sk, "D-SACK");
|
||||
tcp_undo_cwnd_reduction(sk, false);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@@ -2432,10 +2432,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
|
||||
tcp_undo_cwnd_reduction(sk, true);
|
||||
|
||||
DBGUNDO(sk, "partial loss");
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
|
||||
if (frto_undo)
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPSPURIOUSRTOS);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPSPURIOUSRTOS);
|
||||
inet_csk(sk)->icsk_retransmits = 0;
|
||||
if (frto_undo || tcp_is_sack(tp))
|
||||
tcp_set_ca_state(sk, TCP_CA_Open);
|
||||
@@ -2559,7 +2559,7 @@ static void tcp_mtup_probe_failed(struct sock *sk)
|
||||
|
||||
icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
|
||||
icsk->icsk_mtup.probe_size = 0;
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
|
||||
}
|
||||
|
||||
static void tcp_mtup_probe_success(struct sock *sk)
|
||||
@@ -2579,7 +2579,7 @@ static void tcp_mtup_probe_success(struct sock *sk)
|
||||
icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
|
||||
icsk->icsk_mtup.probe_size = 0;
|
||||
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
|
||||
}
|
||||
|
||||
/* Do a simple retransmit without using the backoff mechanisms in
|
||||
@@ -2643,7 +2643,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
|
||||
else
|
||||
mib_idx = LINUX_MIB_TCPSACKRECOVERY;
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), mib_idx);
|
||||
__NET_INC_STATS(sock_net(sk), mib_idx);
|
||||
|
||||
tp->prior_ssthresh = 0;
|
||||
tcp_init_undo(tp);
|
||||
@@ -2736,7 +2736,7 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked)
|
||||
|
||||
DBGUNDO(sk, "partial recovery");
|
||||
tcp_undo_cwnd_reduction(sk, true);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
|
||||
tcp_try_keep_open(sk);
|
||||
return true;
|
||||
}
|
||||
@@ -3431,7 +3431,7 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
|
||||
s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
|
||||
|
||||
if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
|
||||
NET_INC_STATS_BH(net, mib_idx);
|
||||
__NET_INC_STATS(net, mib_idx);
|
||||
return true; /* rate-limited: don't send yet! */
|
||||
}
|
||||
}
|
||||
@@ -3464,7 +3464,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
|
||||
challenge_count = 0;
|
||||
}
|
||||
if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
|
||||
tcp_send_ack(sk);
|
||||
}
|
||||
}
|
||||
@@ -3513,8 +3513,8 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
|
||||
tcp_set_ca_state(sk, TCP_CA_CWR);
|
||||
tcp_end_cwnd_reduction(sk);
|
||||
tcp_try_keep_open(sk);
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPLOSSPROBERECOVERY);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPLOSSPROBERECOVERY);
|
||||
} else if (!(flag & (FLAG_SND_UNA_ADVANCED |
|
||||
FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
|
||||
/* Pure dupack: original and TLP probe arrived; no loss */
|
||||
@@ -3618,14 +3618,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||
|
||||
tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE);
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS);
|
||||
} else {
|
||||
u32 ack_ev_flags = CA_ACK_SLOWPATH;
|
||||
|
||||
if (ack_seq != TCP_SKB_CB(skb)->end_seq)
|
||||
flag |= FLAG_DATA;
|
||||
else
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS);
|
||||
|
||||
flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
|
||||
|
||||
@@ -4128,7 +4128,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
|
||||
else
|
||||
mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), mib_idx);
|
||||
__NET_INC_STATS(sock_net(sk), mib_idx);
|
||||
|
||||
tp->rx_opt.dsack = 1;
|
||||
tp->duplicate_sack[0].start_seq = seq;
|
||||
@@ -4152,7 +4152,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
|
||||
|
||||
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
|
||||
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
|
||||
tcp_enter_quickack_mode(sk);
|
||||
|
||||
if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
|
||||
@@ -4302,7 +4302,7 @@ static bool tcp_try_coalesce(struct sock *sk,
|
||||
|
||||
atomic_add(delta, &sk->sk_rmem_alloc);
|
||||
sk_mem_charge(sk, delta);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
|
||||
TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
|
||||
TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
|
||||
TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags;
|
||||
@@ -4390,7 +4390,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
tcp_ecn_check_ce(tp, skb);
|
||||
|
||||
if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
|
||||
tcp_drop(sk, skb);
|
||||
return;
|
||||
}
|
||||
@@ -4399,7 +4399,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
tp->pred_flags = 0;
|
||||
inet_csk_schedule_ack(sk);
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
|
||||
SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
|
||||
tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
|
||||
|
||||
@@ -4454,7 +4454,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
|
||||
if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
|
||||
/* All the bits are present. Drop. */
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
|
||||
tcp_drop(sk, skb);
|
||||
skb = NULL;
|
||||
tcp_dsack_set(sk, seq, end_seq);
|
||||
@@ -4493,7 +4493,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
__skb_unlink(skb1, &tp->out_of_order_queue);
|
||||
tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
|
||||
TCP_SKB_CB(skb1)->end_seq);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
|
||||
tcp_drop(sk, skb1);
|
||||
}
|
||||
|
||||
@@ -4658,7 +4658,7 @@ queue_and_out:
|
||||
|
||||
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
|
||||
/* A retransmit, 2nd most common case. Force an immediate ack. */
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
|
||||
tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
|
||||
|
||||
out_of_window:
|
||||
@@ -4704,7 +4704,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
__skb_unlink(skb, list);
|
||||
__kfree_skb(skb);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
|
||||
|
||||
return next;
|
||||
}
|
||||
@@ -4863,7 +4863,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
|
||||
bool res = false;
|
||||
|
||||
if (!skb_queue_empty(&tp->out_of_order_queue)) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
|
||||
__skb_queue_purge(&tp->out_of_order_queue);
|
||||
|
||||
/* Reset SACK state. A conforming SACK implementation will
|
||||
@@ -4892,7 +4892,7 @@ static int tcp_prune_queue(struct sock *sk)
|
||||
|
||||
SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
|
||||
|
||||
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
|
||||
tcp_clamp_window(sk);
|
||||
@@ -4922,7 +4922,7 @@ static int tcp_prune_queue(struct sock *sk)
|
||||
* drop receive data on the floor. It will get retransmitted
|
||||
* and hopefully then we'll have sufficient space.
|
||||
*/
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED);
|
||||
|
||||
/* Massive buffer overcommit. */
|
||||
tp->pred_flags = 0;
|
||||
@@ -5181,7 +5181,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
|
||||
if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
|
||||
tcp_paws_discard(sk, skb)) {
|
||||
if (!th->rst) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
|
||||
if (!tcp_oow_rate_limited(sock_net(sk), skb,
|
||||
LINUX_MIB_TCPACKSKIPPEDPAWS,
|
||||
&tp->last_oow_ack_time))
|
||||
@@ -5234,7 +5234,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
|
||||
syn_challenge:
|
||||
if (syn_inerr)
|
||||
__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
|
||||
tcp_send_challenge_ack(sk, skb);
|
||||
goto discard;
|
||||
}
|
||||
@@ -5377,7 +5377,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
__skb_pull(skb, tcp_header_len);
|
||||
tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPHPHITSTOUSER);
|
||||
eaten = 1;
|
||||
}
|
||||
}
|
||||
@@ -5399,7 +5400,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
tcp_rcv_rtt_measure_ts(sk, skb);
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
|
||||
|
||||
/* Bulk data transfer: receiver */
|
||||
eaten = tcp_queue_rcv(sk, skb, tcp_header_len,
|
||||
@@ -5549,12 +5550,14 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
|
||||
break;
|
||||
}
|
||||
tcp_rearm_rto(sk);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
|
||||
return true;
|
||||
}
|
||||
tp->syn_data_acked = tp->syn_data;
|
||||
if (tp->syn_data_acked)
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPFASTOPENACTIVE);
|
||||
|
||||
tcp_fastopen_add_skb(sk, synack);
|
||||
|
||||
@@ -5589,7 +5592,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
|
||||
!between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
|
||||
tcp_time_stamp)) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_PAWSACTIVEREJECTED);
|
||||
goto reset_and_undo;
|
||||
}
|
||||
|
||||
@@ -5958,7 +5962,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
|
||||
(TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
|
||||
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
|
||||
tcp_done(sk);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -6015,7 +6019,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
|
||||
if (sk->sk_shutdown & RCV_SHUTDOWN) {
|
||||
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
|
||||
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
|
||||
tcp_reset(sk);
|
||||
return 1;
|
||||
}
|
||||
@@ -6153,10 +6157,10 @@ static bool tcp_syn_flood_action(const struct sock *sk,
|
||||
if (net->ipv4.sysctl_tcp_syncookies) {
|
||||
msg = "Sending cookies";
|
||||
want_cookie = true;
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
|
||||
} else
|
||||
#endif
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
|
||||
|
||||
if (!queue->synflood_warned &&
|
||||
net->ipv4.sysctl_tcp_syncookies != 2 &&
|
||||
@@ -6217,7 +6221,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
||||
* timeout.
|
||||
*/
|
||||
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@@ -6264,7 +6268,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
||||
if (dst && strict &&
|
||||
!tcp_peer_is_proven(req, dst, true,
|
||||
tmp_opt.saw_tstamp)) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
|
||||
goto drop_and_release;
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user