Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

net/ipv6/ip6_gre.c is a case of parallel adds.

include/trace/events/tcp.h is a little bit more tricky.  The removal
of in-trace-macro ifdefs in 'net' paralleled with moving
show_tcp_state_name and friends over to include/trace/events/sock.h
in 'net-next'.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2017-12-29 15:14:27 -05:00
212 changed files with 2388 additions and 1214 deletions

View File

@@ -324,6 +324,7 @@ restart:
if (res) {
pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
name, -res);
kfree(b);
return -EINVAL;
}
@@ -347,8 +348,10 @@ restart:
if (skb)
tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
if (tipc_mon_create(net, bearer_id))
if (tipc_mon_create(net, bearer_id)) {
bearer_disable(net, b);
return -ENOMEM;
}
pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
name,

View File

@@ -368,18 +368,20 @@ void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
u16 prev = grp->bc_snd_nxt - 1;
struct tipc_member *m;
struct rb_node *n;
u16 ackers = 0;
for (n = rb_first(&grp->members); n; n = rb_next(n)) {
m = container_of(n, struct tipc_member, tree_node);
if (tipc_group_is_enabled(m)) {
tipc_group_update_member(m, len);
m->bc_acked = prev;
ackers++;
}
}
/* Mark number of acknowledges to expect, if any */
if (ack)
grp->bc_ackers = grp->member_cnt;
grp->bc_ackers = ackers;
grp->bc_snd_nxt++;
}
@@ -848,17 +850,26 @@ void tipc_group_member_evt(struct tipc_group *grp,
*usr_wakeup = true;
m->usr_pending = false;
node_up = tipc_node_is_up(net, node);
m->event_msg = NULL;
/* Hold back event if more messages might be expected */
if (m->state != MBR_LEAVING && node_up) {
m->event_msg = skb;
tipc_group_decr_active(grp, m);
m->state = MBR_LEAVING;
} else {
if (node_up)
if (node_up) {
/* Hold back event if a LEAVE msg should be expected */
if (m->state != MBR_LEAVING) {
m->event_msg = skb;
tipc_group_decr_active(grp, m);
m->state = MBR_LEAVING;
} else {
msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
else
__skb_queue_tail(inputq, skb);
}
} else {
if (m->state != MBR_LEAVING) {
tipc_group_decr_active(grp, m);
m->state = MBR_LEAVING;
msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt);
} else {
msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
}
__skb_queue_tail(inputq, skb);
}
list_del_init(&m->list);

View File

@@ -642,9 +642,13 @@ void tipc_mon_delete(struct net *net, int bearer_id)
{
struct tipc_net *tn = tipc_net(net);
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
struct tipc_peer *self = get_self(net, bearer_id);
struct tipc_peer *self;
struct tipc_peer *peer, *tmp;
if (!mon)
return;
self = get_self(net, bearer_id);
write_lock_bh(&mon->lock);
tn->monitors[bearer_id] = NULL;
list_for_each_entry_safe(peer, tmp, &self->list, list) {

View File

@@ -727,11 +727,11 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
switch (sk->sk_state) {
case TIPC_ESTABLISHED:
case TIPC_CONNECTING:
if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
revents |= POLLOUT;
/* fall thru' */
case TIPC_LISTEN:
case TIPC_CONNECTING:
if (!skb_queue_empty(&sk->sk_receive_queue))
revents |= POLLIN | POLLRDNORM;
break;