Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
@@ -1334,13 +1334,14 @@ static struct pernet_operations fib_net_ops = {
|
||||
|
||||
void __init ip_fib_init(void)
|
||||
{
|
||||
rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
|
||||
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
|
||||
rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
|
||||
fib_trie_init();
|
||||
|
||||
register_pernet_subsys(&fib_net_ops);
|
||||
|
||||
register_netdevice_notifier(&fib_netdev_notifier);
|
||||
register_inetaddr_notifier(&fib_inetaddr_notifier);
|
||||
|
||||
fib_trie_init();
|
||||
rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
|
||||
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
|
||||
rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
|
||||
}
|
||||
|
@@ -599,6 +599,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
||||
hlen = iph->ihl * 4;
|
||||
mtu = mtu - hlen; /* Size of data space */
|
||||
IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
|
||||
ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
|
||||
|
||||
/* When frag_list is given, use it. First, check its validity:
|
||||
* some transformers could create wrong frag_list or break existing
|
||||
@@ -614,14 +615,15 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
||||
if (first_len - hlen > mtu ||
|
||||
((first_len - hlen) & 7) ||
|
||||
ip_is_fragment(iph) ||
|
||||
skb_cloned(skb))
|
||||
skb_cloned(skb) ||
|
||||
skb_headroom(skb) < ll_rs)
|
||||
goto slow_path;
|
||||
|
||||
skb_walk_frags(skb, frag) {
|
||||
/* Correct geometry. */
|
||||
if (frag->len > mtu ||
|
||||
((frag->len & 7) && frag->next) ||
|
||||
skb_headroom(frag) < hlen)
|
||||
skb_headroom(frag) < hlen + ll_rs)
|
||||
goto slow_path_clean;
|
||||
|
||||
/* Partially cloned skb? */
|
||||
@@ -711,8 +713,6 @@ slow_path:
|
||||
left = skb->len - hlen; /* Space per frame */
|
||||
ptr = hlen; /* Where to start from */
|
||||
|
||||
ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
|
||||
|
||||
/*
|
||||
* Fragment the datagram.
|
||||
*/
|
||||
|
@@ -72,8 +72,7 @@ static const struct nf_chain_type filter_arp = {
|
||||
.family = NFPROTO_ARP,
|
||||
.owner = THIS_MODULE,
|
||||
.hook_mask = (1 << NF_ARP_IN) |
|
||||
(1 << NF_ARP_OUT) |
|
||||
(1 << NF_ARP_FORWARD),
|
||||
(1 << NF_ARP_OUT),
|
||||
};
|
||||
|
||||
static int __init nf_tables_arp_init(void)
|
||||
|
@@ -2979,8 +2979,7 @@ static __net_init int rt_genid_init(struct net *net)
|
||||
{
|
||||
atomic_set(&net->ipv4.rt_genid, 0);
|
||||
atomic_set(&net->fnhe_genid, 0);
|
||||
get_random_bytes(&net->ipv4.dev_addr_genid,
|
||||
sizeof(net->ipv4.dev_addr_genid));
|
||||
atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -335,6 +335,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
|
||||
treq->rcv_isn = ntohl(th->seq) - 1;
|
||||
treq->snt_isn = cookie;
|
||||
treq->ts_off = 0;
|
||||
treq->txhash = net_tx_rndhash();
|
||||
req->mss = mss;
|
||||
ireq->ir_num = ntohs(th->dest);
|
||||
ireq->ir_rmt_port = th->source;
|
||||
|
@@ -112,7 +112,8 @@ struct bbr {
|
||||
cwnd_gain:10, /* current gain for setting cwnd */
|
||||
full_bw_cnt:3, /* number of rounds without large bw gains */
|
||||
cycle_idx:3, /* current index in pacing_gain cycle array */
|
||||
unused_b:6;
|
||||
has_seen_rtt:1, /* have we seen an RTT sample yet? */
|
||||
unused_b:5;
|
||||
u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
|
||||
u32 full_bw; /* recent bw, to estimate if pipe is full */
|
||||
};
|
||||
@@ -211,6 +212,35 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
|
||||
return rate >> BW_SCALE;
|
||||
}
|
||||
|
||||
/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
|
||||
static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
|
||||
{
|
||||
u64 rate = bw;
|
||||
|
||||
rate = bbr_rate_bytes_per_sec(sk, rate, gain);
|
||||
rate = min_t(u64, rate, sk->sk_max_pacing_rate);
|
||||
return rate;
|
||||
}
|
||||
|
||||
/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
|
||||
static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct bbr *bbr = inet_csk_ca(sk);
|
||||
u64 bw;
|
||||
u32 rtt_us;
|
||||
|
||||
if (tp->srtt_us) { /* any RTT sample yet? */
|
||||
rtt_us = max(tp->srtt_us >> 3, 1U);
|
||||
bbr->has_seen_rtt = 1;
|
||||
} else { /* no RTT sample yet */
|
||||
rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
|
||||
}
|
||||
bw = (u64)tp->snd_cwnd * BW_UNIT;
|
||||
do_div(bw, rtt_us);
|
||||
sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
|
||||
}
|
||||
|
||||
/* Pace using current bw estimate and a gain factor. In order to help drive the
|
||||
* network toward lower queues while maintaining high utilization and low
|
||||
* latency, the average pacing rate aims to be slightly (~1%) lower than the
|
||||
@@ -220,12 +250,13 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
|
||||
*/
|
||||
static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct bbr *bbr = inet_csk_ca(sk);
|
||||
u64 rate = bw;
|
||||
u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain);
|
||||
|
||||
rate = bbr_rate_bytes_per_sec(sk, rate, gain);
|
||||
rate = min_t(u64, rate, sk->sk_max_pacing_rate);
|
||||
if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate)
|
||||
if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
|
||||
bbr_init_pacing_rate_from_rtt(sk);
|
||||
if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
|
||||
sk->sk_pacing_rate = rate;
|
||||
}
|
||||
|
||||
@@ -798,7 +829,6 @@ static void bbr_init(struct sock *sk)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct bbr *bbr = inet_csk_ca(sk);
|
||||
u64 bw;
|
||||
|
||||
bbr->prior_cwnd = 0;
|
||||
bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */
|
||||
@@ -814,11 +844,8 @@ static void bbr_init(struct sock *sk)
|
||||
|
||||
minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
|
||||
|
||||
/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
|
||||
bw = (u64)tp->snd_cwnd * BW_UNIT;
|
||||
do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC);
|
||||
sk->sk_pacing_rate = 0; /* force an update of sk_pacing_rate */
|
||||
bbr_set_pacing_rate(sk, bw, bbr_high_gain);
|
||||
bbr->has_seen_rtt = 0;
|
||||
bbr_init_pacing_rate_from_rtt(sk);
|
||||
|
||||
bbr->restore_cwnd = 0;
|
||||
bbr->round_start = 0;
|
||||
|
@@ -1388,6 +1388,11 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
|
||||
unlock_sock_fast(sk, slow);
|
||||
}
|
||||
|
||||
/* we cleared the head states previously only if the skb lacks any IP
|
||||
* options, see __udp_queue_rcv_skb().
|
||||
*/
|
||||
if (unlikely(IPCB(skb)->opt.optlen > 0))
|
||||
skb_release_head_state(skb);
|
||||
consume_stateless_skb(skb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skb_consume_udp);
|
||||
@@ -1779,8 +1784,12 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
sk_mark_napi_id_once(sk, skb);
|
||||
}
|
||||
|
||||
/* clear all pending head states while they are hot in the cache */
|
||||
skb_release_head_state(skb);
|
||||
/* At recvmsg() time we need skb->dst to process IP options-related
|
||||
* cmsg, elsewhere can we clear all pending head states while they are
|
||||
* hot in the cache
|
||||
*/
|
||||
if (likely(IPCB(skb)->opt.optlen == 0))
|
||||
skb_release_head_state(skb);
|
||||
|
||||
rc = __udp_enqueue_schedule_skb(sk, skb);
|
||||
if (rc < 0) {
|
||||
|
Reference in New Issue
Block a user