Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Quoth David: 1) GRO MAC header comparisons were ethernet specific, breaking other link types. This required a multi-faceted fix to cure the originally noted case (Infiniband), because IPoIB was lying about it's actual hard header length. Thanks to Eric Dumazet, Roland Dreier, and others. 2) Fix build failure when INET_UDP_DIAG is built in and ipv6 is modular. From Anisse Astier. 3) Off by ones and other bug fixes in netprio_cgroup from Neil Horman. 4) ipv4 TCP reset generation needs to respect any network interface binding from the socket, otherwise route lookups might give a different result than all the other segments received. From Shawn Lu. 5) Fix unintended regression in ipv4 proxy ARP responses, from Thomas Graf. 6) Fix SKB under-allocation bug in sh_eth, from Yoshihiro Shimoda. 7) Revert skge PCI mapping changes that are causing crashes for some folks, from Stephen Hemminger. 8) IPV4 route lookups fill in the wildcarded fields of the given flow lookup key passed in, which is fine most of the time as this is exactly what the caller's want. However there are a few cases that want to retain the original flow key values afterwards, so handle those cases properly. Fix from Julian Anastasov. 9) IGB/IXGBE VF lookup bug fixes from Greg Rose. 10) Properly null terminate filename passed to ethtool flash device method, from Ben Hutchings. 11) S3 resume fix in via-velocity from David Lv. 12) Fix double SKB free during xmit failure in CAIF, from Dmitry Tarnyagin. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (72 commits) net: Don't proxy arp respond if iif == rt->dst.dev if private VLAN is disabled ipv4: Fix wrong order of ip_rt_get_source() and update iph->daddr. netprio_cgroup: fix wrong memory access when NETPRIO_CGROUP=m netprio_cgroup: don't allocate prio table when a device is registered netprio_cgroup: fix an off-by-one bug bna: fix error handling of bnad_get_flash_partition_by_offset() isdn: type bug in isdn_net_header() net: Make qdisc_skb_cb upper size bound explicit. ixgbe: ethtool: stats user buffer overrun ixgbe: dcb: up2tc mapping lost on disable/enable CEE DCB state ixgbe: do not update real num queues when netdev is going away ixgbe: Fix broken dependency on MAX_SKB_FRAGS being related to page size ixgbe: Fix case of Tx Hang in PF with 32 VFs ixgbe: fix vf lookup igb: fix vf lookup e1000: add dropped DMA receive enable back in for WoL gro: more generic L2 header check IPoIB: Stop lying about hard_header_len and use skb->cb to stash LL addresses zd1211rw: firmware needs duration_id set to zero for non-pspoll frames net: enable TC35815 for MIPS again ...
This commit is contained in:
@@ -539,8 +539,10 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
|
||||
pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
|
||||
memset(skb->cb, 0, sizeof(struct caif_payload_info));
|
||||
|
||||
if (cf_sk->layer.dn == NULL)
|
||||
if (cf_sk->layer.dn == NULL) {
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
|
||||
}
|
||||
@@ -683,10 +685,10 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
|
||||
}
|
||||
err = transmit_skb(skb, cf_sk,
|
||||
msg->msg_flags&MSG_DONTWAIT, timeo);
|
||||
if (err < 0) {
|
||||
kfree_skb(skb);
|
||||
if (err < 0)
|
||||
/* skb is already freed */
|
||||
goto pipe_err;
|
||||
}
|
||||
|
||||
sent += size;
|
||||
}
|
||||
|
||||
|
@@ -248,7 +248,6 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
|
||||
{
|
||||
struct cfmuxl *muxl = container_obj(layr);
|
||||
struct cflayer *layer;
|
||||
int idx;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
|
||||
@@ -257,14 +256,9 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
|
||||
|
||||
if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND ||
|
||||
ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
|
||||
layer->id != 0) {
|
||||
layer->id != 0)
|
||||
cfmuxl_remove_uplayer(layr, layer->id);
|
||||
|
||||
idx = layer->id % UP_CACHE_SIZE;
|
||||
spin_lock_bh(&muxl->receive_lock);
|
||||
RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
|
||||
list_del_rcu(&layer->node);
|
||||
spin_unlock_bh(&muxl->receive_lock);
|
||||
}
|
||||
/* NOTE: ctrlcmd is not allowed to block */
|
||||
layer->ctrlcmd(layer, ctrl, phyid);
|
||||
}
|
||||
|
@@ -3500,14 +3500,20 @@ static inline gro_result_t
|
||||
__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff *p;
|
||||
unsigned int maclen = skb->dev->hard_header_len;
|
||||
|
||||
for (p = napi->gro_list; p; p = p->next) {
|
||||
unsigned long diffs;
|
||||
|
||||
diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
|
||||
diffs |= p->vlan_tci ^ skb->vlan_tci;
|
||||
diffs |= compare_ether_header(skb_mac_header(p),
|
||||
skb_gro_mac_header(skb));
|
||||
if (maclen == ETH_HLEN)
|
||||
diffs |= compare_ether_header(skb_mac_header(p),
|
||||
skb_gro_mac_header(skb));
|
||||
else if (!diffs)
|
||||
diffs = memcmp(skb_mac_header(p),
|
||||
skb_gro_mac_header(skb),
|
||||
maclen);
|
||||
NAPI_GRO_CB(p)->same_flow = !diffs;
|
||||
NAPI_GRO_CB(p)->flush = 0;
|
||||
}
|
||||
|
@@ -1190,6 +1190,8 @@ static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
|
||||
if (!dev->ethtool_ops->flash_device)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
|
||||
|
||||
return dev->ethtool_ops->flash_device(dev, &efl);
|
||||
}
|
||||
|
||||
|
@@ -58,11 +58,12 @@ static int get_prioidx(u32 *prio)
|
||||
|
||||
spin_lock_irqsave(&prioidx_map_lock, flags);
|
||||
prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
|
||||
if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) {
|
||||
spin_unlock_irqrestore(&prioidx_map_lock, flags);
|
||||
return -ENOSPC;
|
||||
}
|
||||
set_bit(prioidx, prioidx_map);
|
||||
spin_unlock_irqrestore(&prioidx_map_lock, flags);
|
||||
if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ)
|
||||
return -ENOSPC;
|
||||
|
||||
atomic_set(&max_prioidx, prioidx);
|
||||
*prio = prioidx;
|
||||
return 0;
|
||||
@@ -107,7 +108,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
|
||||
static void update_netdev_tables(void)
|
||||
{
|
||||
struct net_device *dev;
|
||||
u32 max_len = atomic_read(&max_prioidx);
|
||||
u32 max_len = atomic_read(&max_prioidx) + 1;
|
||||
struct netprio_map *map;
|
||||
|
||||
rtnl_lock();
|
||||
@@ -270,7 +271,6 @@ static int netprio_device_event(struct notifier_block *unused,
|
||||
{
|
||||
struct net_device *dev = ptr;
|
||||
struct netprio_map *old;
|
||||
u32 max_len = atomic_read(&max_prioidx);
|
||||
|
||||
/*
|
||||
* Note this is called with rtnl_lock held so we have update side
|
||||
@@ -278,11 +278,6 @@ static int netprio_device_event(struct notifier_block *unused,
|
||||
*/
|
||||
|
||||
switch (event) {
|
||||
|
||||
case NETDEV_REGISTER:
|
||||
if (max_len)
|
||||
extend_netdev_table(dev, max_len);
|
||||
break;
|
||||
case NETDEV_UNREGISTER:
|
||||
old = rtnl_dereference(dev->priomap);
|
||||
RCU_INIT_POINTER(dev->priomap, NULL);
|
||||
|
@@ -1171,13 +1171,10 @@ EXPORT_SYMBOL(sock_update_classid);
|
||||
|
||||
void sock_update_netprioidx(struct sock *sk)
|
||||
{
|
||||
struct cgroup_netprio_state *state;
|
||||
if (in_interrupt())
|
||||
return;
|
||||
rcu_read_lock();
|
||||
state = task_netprio_state(current);
|
||||
sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
|
||||
rcu_read_unlock();
|
||||
|
||||
sk->sk_cgrp_prioidx = task_netprioidx(current);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sock_update_netprioidx);
|
||||
#endif
|
||||
|
@@ -409,7 +409,7 @@ config INET_TCP_DIAG
|
||||
|
||||
config INET_UDP_DIAG
|
||||
tristate "UDP: socket monitoring interface"
|
||||
depends on INET_DIAG
|
||||
depends on INET_DIAG && (IPV6 || IPV6=n)
|
||||
default n
|
||||
---help---
|
||||
Support for UDP socket monitoring interface used by the ss tool.
|
||||
|
@@ -863,7 +863,8 @@ static int arp_process(struct sk_buff *skb)
|
||||
if (addr_type == RTN_UNICAST &&
|
||||
(arp_fwd_proxy(in_dev, dev, rt) ||
|
||||
arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
|
||||
pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) {
|
||||
(rt->dst.dev != dev &&
|
||||
pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) {
|
||||
n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
|
||||
if (n)
|
||||
neigh_release(n);
|
||||
|
@@ -573,8 +573,8 @@ void ip_forward_options(struct sk_buff *skb)
|
||||
}
|
||||
if (srrptr + 3 <= srrspace) {
|
||||
opt->is_changed = 1;
|
||||
ip_rt_get_source(&optptr[srrptr-1], skb, rt);
|
||||
ip_hdr(skb)->daddr = opt->nexthop;
|
||||
ip_rt_get_source(&optptr[srrptr-1], skb, rt);
|
||||
optptr[2] = srrptr+4;
|
||||
} else if (net_ratelimit())
|
||||
printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
|
||||
|
@@ -778,7 +778,6 @@ EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
|
||||
static __net_init int ipv4_sysctl_init_net(struct net *net)
|
||||
{
|
||||
struct ctl_table *table;
|
||||
unsigned long limit;
|
||||
|
||||
table = ipv4_net_table;
|
||||
if (!net_eq(net, &init_net)) {
|
||||
@@ -815,11 +814,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
|
||||
net->ipv4.sysctl_rt_cache_rebuild_count = 4;
|
||||
|
||||
tcp_init_mem(net);
|
||||
limit = nr_free_buffer_pages() / 8;
|
||||
limit = max(limit, 128UL);
|
||||
net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
|
||||
net->ipv4.sysctl_tcp_mem[1] = limit;
|
||||
net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
|
||||
|
||||
net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
|
||||
net_ipv4_ctl_path, table);
|
||||
|
@@ -1876,6 +1876,20 @@ void tcp_shutdown(struct sock *sk, int how)
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_shutdown);
|
||||
|
||||
bool tcp_check_oom(struct sock *sk, int shift)
|
||||
{
|
||||
bool too_many_orphans, out_of_socket_memory;
|
||||
|
||||
too_many_orphans = tcp_too_many_orphans(sk, shift);
|
||||
out_of_socket_memory = tcp_out_of_memory(sk);
|
||||
|
||||
if (too_many_orphans && net_ratelimit())
|
||||
pr_info("TCP: too many orphaned sockets\n");
|
||||
if (out_of_socket_memory && net_ratelimit())
|
||||
pr_info("TCP: out of memory -- consider tuning tcp_mem\n");
|
||||
return too_many_orphans || out_of_socket_memory;
|
||||
}
|
||||
|
||||
void tcp_close(struct sock *sk, long timeout)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
@@ -2015,10 +2029,7 @@ adjudge_to_death:
|
||||
}
|
||||
if (sk->sk_state != TCP_CLOSE) {
|
||||
sk_mem_reclaim(sk);
|
||||
if (tcp_too_many_orphans(sk, 0)) {
|
||||
if (net_ratelimit())
|
||||
printk(KERN_INFO "TCP: too many of orphaned "
|
||||
"sockets\n");
|
||||
if (tcp_check_oom(sk, 0)) {
|
||||
tcp_set_state(sk, TCP_CLOSE);
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC);
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
@@ -3218,7 +3229,6 @@ __setup("thash_entries=", set_thash_entries);
|
||||
|
||||
void tcp_init_mem(struct net *net)
|
||||
{
|
||||
/* Set per-socket limits to no more than 1/128 the pressure threshold */
|
||||
unsigned long limit = nr_free_buffer_pages() / 8;
|
||||
limit = max(limit, 128UL);
|
||||
net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
|
||||
@@ -3287,7 +3297,8 @@ void __init tcp_init(void)
|
||||
sysctl_max_syn_backlog = max(128, cnt / 256);
|
||||
|
||||
tcp_init_mem(&init_net);
|
||||
limit = nr_free_buffer_pages() / 8;
|
||||
/* Set per-socket limits to no more than 1/128 the pressure threshold */
|
||||
limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10);
|
||||
limit = max(limit, 128UL);
|
||||
max_share = min(4UL*1024*1024, limit);
|
||||
|
||||
|
@@ -651,6 +651,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
|
||||
arg.iov[0].iov_len, IPPROTO_TCP, 0);
|
||||
arg.csumoffset = offsetof(struct tcphdr, check) / 2;
|
||||
arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
|
||||
/* When socket is gone, all binding information is lost.
|
||||
* routing might fail in this case. using iif for oif to
|
||||
* make sure we can deliver it
|
||||
*/
|
||||
arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
|
||||
|
||||
net = dev_net(skb_dst(skb)->dev);
|
||||
arg.tos = ip_hdr(skb)->tos;
|
||||
|
@@ -77,10 +77,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
|
||||
if (sk->sk_err_soft)
|
||||
shift++;
|
||||
|
||||
if (tcp_too_many_orphans(sk, shift)) {
|
||||
if (net_ratelimit())
|
||||
printk(KERN_INFO "Out of socket memory\n");
|
||||
|
||||
if (tcp_check_oom(sk, shift)) {
|
||||
/* Catch exceptional cases, when connection requires reset.
|
||||
* 1. Last segment was sent recently. */
|
||||
if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
|
||||
|
@@ -611,7 +611,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
|
||||
index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
|
||||
tid_agg_rx->buf_size;
|
||||
if (!tid_agg_rx->reorder_buf[index] &&
|
||||
tid_agg_rx->stored_mpdu_num > 1) {
|
||||
tid_agg_rx->stored_mpdu_num) {
|
||||
/*
|
||||
* No buffers ready to be released, but check whether any
|
||||
* frames in the reorder buffer have timed out.
|
||||
|
@@ -148,8 +148,7 @@ struct choke_skb_cb {
|
||||
|
||||
static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(skb->cb) <
|
||||
sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
|
||||
qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
|
||||
return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
|
||||
}
|
||||
|
||||
|
@@ -130,8 +130,7 @@ struct netem_skb_cb {
|
||||
|
||||
static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(skb->cb) <
|
||||
sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
|
||||
qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
|
||||
return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
|
||||
}
|
||||
|
||||
|
@@ -94,8 +94,7 @@ struct sfb_skb_cb {
|
||||
|
||||
static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(skb->cb) <
|
||||
sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
|
||||
qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
|
||||
return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
|
||||
}
|
||||
|
||||
|
@@ -166,9 +166,8 @@ struct sfq_skb_cb {
|
||||
|
||||
static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(skb->cb) <
|
||||
sizeof(struct qdisc_skb_cb) + sizeof(struct sfq_skb_cb));
|
||||
return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
|
||||
qdisc_cb_private_validate(skb, sizeof(struct sfq_skb_cb));
|
||||
return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
|
||||
}
|
||||
|
||||
static unsigned int sfq_hash(const struct sfq_sched_data *q,
|
||||
|
Reference in New Issue
Block a user