Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Overlapping changes in selftests Makefile. Signed-off-by: David S. Miller <davem@davemloft.net>
Esse commit está contido em:
@@ -523,8 +523,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
|
||||
return -ELOOP;
|
||||
}
|
||||
|
||||
/* Device is already being bridged */
|
||||
if (br_port_exists(dev))
|
||||
/* Device has master upper dev */
|
||||
if (netdev_master_upper_dev_get(dev))
|
||||
return -EBUSY;
|
||||
|
||||
/* No bridging devices that dislike that (e.g. wireless) */
|
||||
|
@@ -2569,6 +2569,11 @@ static int try_write(struct ceph_connection *con)
|
||||
int ret = 1;
|
||||
|
||||
dout("try_write start %p state %lu\n", con, con->state);
|
||||
if (con->state != CON_STATE_PREOPEN &&
|
||||
con->state != CON_STATE_CONNECTING &&
|
||||
con->state != CON_STATE_NEGOTIATING &&
|
||||
con->state != CON_STATE_OPEN)
|
||||
return 0;
|
||||
|
||||
more:
|
||||
dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
|
||||
@@ -2594,6 +2599,8 @@ more:
|
||||
}
|
||||
|
||||
more_kvec:
|
||||
BUG_ON(!con->sock);
|
||||
|
||||
/* kvec data queued? */
|
||||
if (con->out_kvec_left) {
|
||||
ret = write_partial_kvec(con);
|
||||
|
@@ -209,6 +209,14 @@ static void reopen_session(struct ceph_mon_client *monc)
|
||||
__open_session(monc);
|
||||
}
|
||||
|
||||
static void un_backoff(struct ceph_mon_client *monc)
|
||||
{
|
||||
monc->hunt_mult /= 2; /* reduce by 50% */
|
||||
if (monc->hunt_mult < 1)
|
||||
monc->hunt_mult = 1;
|
||||
dout("%s hunt_mult now %d\n", __func__, monc->hunt_mult);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reschedule delayed work timer.
|
||||
*/
|
||||
@@ -963,6 +971,7 @@ static void delayed_work(struct work_struct *work)
|
||||
if (!monc->hunting) {
|
||||
ceph_con_keepalive(&monc->con);
|
||||
__validate_auth(monc);
|
||||
un_backoff(monc);
|
||||
}
|
||||
|
||||
if (is_auth &&
|
||||
@@ -1123,9 +1132,8 @@ static void finish_hunting(struct ceph_mon_client *monc)
|
||||
dout("%s found mon%d\n", __func__, monc->cur_mon);
|
||||
monc->hunting = false;
|
||||
monc->had_a_connection = true;
|
||||
monc->hunt_mult /= 2; /* reduce by 50% */
|
||||
if (monc->hunt_mult < 1)
|
||||
monc->hunt_mult = 1;
|
||||
un_backoff(monc);
|
||||
__schedule_delayed(monc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -377,7 +377,8 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
|
||||
optname == SO_ATTACH_REUSEPORT_CBPF)
|
||||
return do_set_attach_filter(sock, level, optname,
|
||||
optval, optlen);
|
||||
if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
|
||||
if (!COMPAT_USE_64BIT_TIME &&
|
||||
(optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
|
||||
return do_set_sock_timeout(sock, level, optname, optval, optlen);
|
||||
|
||||
return sock_setsockopt(sock, level, optname, optval, optlen);
|
||||
@@ -448,7 +449,8 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
|
||||
static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
|
||||
char __user *optval, int __user *optlen)
|
||||
{
|
||||
if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
|
||||
if (!COMPAT_USE_64BIT_TIME &&
|
||||
(optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
|
||||
return do_get_sock_timeout(sock, level, optname, optval, optlen);
|
||||
return sock_getsockopt(sock, level, optname, optval, optlen);
|
||||
}
|
||||
|
@@ -1007,6 +1007,11 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
|
||||
info_size = sizeof(info);
|
||||
if (copy_from_user(&info, useraddr, info_size))
|
||||
return -EFAULT;
|
||||
/* Since malicious users may modify the original data,
|
||||
* we need to check whether FLOW_RSS is still requested.
|
||||
*/
|
||||
if (!(info.flow_type & FLOW_RSS))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (info.cmd == ETHTOOL_GRXCLSRLALL) {
|
||||
|
@@ -126,6 +126,16 @@ static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
|
||||
DCCPF_SEQ_WMAX));
|
||||
}
|
||||
|
||||
static void dccp_tasklet_schedule(struct sock *sk)
|
||||
{
|
||||
struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
|
||||
|
||||
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
|
||||
sock_hold(sk);
|
||||
__tasklet_schedule(t);
|
||||
}
|
||||
}
|
||||
|
||||
static void ccid2_hc_tx_rto_expire(struct timer_list *t)
|
||||
{
|
||||
struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer);
|
||||
@@ -166,7 +176,7 @@ static void ccid2_hc_tx_rto_expire(struct timer_list *t)
|
||||
|
||||
/* if we were blocked before, we may now send cwnd=1 packet */
|
||||
if (sender_was_blocked)
|
||||
tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
|
||||
dccp_tasklet_schedule(sk);
|
||||
/* restart backed-off timer */
|
||||
sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
|
||||
out:
|
||||
@@ -706,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
|
||||
done:
|
||||
/* check if incoming Acks allow pending packets to be sent */
|
||||
if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
|
||||
tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
|
||||
dccp_tasklet_schedule(sk);
|
||||
dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
|
||||
}
|
||||
|
||||
|
@@ -232,6 +232,7 @@ static void dccp_write_xmitlet(unsigned long data)
|
||||
else
|
||||
dccp_write_xmit(sk);
|
||||
bh_unlock_sock(sk);
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
static void dccp_write_xmit_timer(struct timer_list *t)
|
||||
@@ -240,7 +241,6 @@ static void dccp_write_xmit_timer(struct timer_list *t)
|
||||
struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk;
|
||||
|
||||
dccp_write_xmitlet((unsigned long)sk);
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
void dccp_init_xmit_timers(struct sock *sk)
|
||||
|
118
net/ipv4/route.c
118
net/ipv4/route.c
@@ -709,7 +709,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
|
||||
fnhe->fnhe_gw = gw;
|
||||
fnhe->fnhe_pmtu = pmtu;
|
||||
fnhe->fnhe_mtu_locked = lock;
|
||||
fnhe->fnhe_expires = expires;
|
||||
fnhe->fnhe_expires = max(1UL, expires);
|
||||
|
||||
/* Exception created; mark the cached routes for the nexthop
|
||||
* stale, so anyone caching it rechecks if this exception
|
||||
@@ -1297,6 +1297,36 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
|
||||
return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
|
||||
}
|
||||
|
||||
static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
|
||||
{
|
||||
struct fnhe_hash_bucket *hash;
|
||||
struct fib_nh_exception *fnhe, __rcu **fnhe_p;
|
||||
u32 hval = fnhe_hashfun(daddr);
|
||||
|
||||
spin_lock_bh(&fnhe_lock);
|
||||
|
||||
hash = rcu_dereference_protected(nh->nh_exceptions,
|
||||
lockdep_is_held(&fnhe_lock));
|
||||
hash += hval;
|
||||
|
||||
fnhe_p = &hash->chain;
|
||||
fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
|
||||
while (fnhe) {
|
||||
if (fnhe->fnhe_daddr == daddr) {
|
||||
rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
|
||||
fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
|
||||
fnhe_flush_routes(fnhe);
|
||||
kfree_rcu(fnhe, rcu);
|
||||
break;
|
||||
}
|
||||
fnhe_p = &fnhe->fnhe_next;
|
||||
fnhe = rcu_dereference_protected(fnhe->fnhe_next,
|
||||
lockdep_is_held(&fnhe_lock));
|
||||
}
|
||||
|
||||
spin_unlock_bh(&fnhe_lock);
|
||||
}
|
||||
|
||||
static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
|
||||
{
|
||||
struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
|
||||
@@ -1310,8 +1340,14 @@ static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
|
||||
|
||||
for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
|
||||
fnhe = rcu_dereference(fnhe->fnhe_next)) {
|
||||
if (fnhe->fnhe_daddr == daddr)
|
||||
if (fnhe->fnhe_daddr == daddr) {
|
||||
if (fnhe->fnhe_expires &&
|
||||
time_after(jiffies, fnhe->fnhe_expires)) {
|
||||
ip_del_fnhe(nh, daddr);
|
||||
break;
|
||||
}
|
||||
return fnhe;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@@ -1636,36 +1672,6 @@ static void ip_handle_martian_source(struct net_device *dev,
|
||||
#endif
|
||||
}
|
||||
|
||||
static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
|
||||
{
|
||||
struct fnhe_hash_bucket *hash;
|
||||
struct fib_nh_exception *fnhe, __rcu **fnhe_p;
|
||||
u32 hval = fnhe_hashfun(daddr);
|
||||
|
||||
spin_lock_bh(&fnhe_lock);
|
||||
|
||||
hash = rcu_dereference_protected(nh->nh_exceptions,
|
||||
lockdep_is_held(&fnhe_lock));
|
||||
hash += hval;
|
||||
|
||||
fnhe_p = &hash->chain;
|
||||
fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
|
||||
while (fnhe) {
|
||||
if (fnhe->fnhe_daddr == daddr) {
|
||||
rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
|
||||
fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
|
||||
fnhe_flush_routes(fnhe);
|
||||
kfree_rcu(fnhe, rcu);
|
||||
break;
|
||||
}
|
||||
fnhe_p = &fnhe->fnhe_next;
|
||||
fnhe = rcu_dereference_protected(fnhe->fnhe_next,
|
||||
lockdep_is_held(&fnhe_lock));
|
||||
}
|
||||
|
||||
spin_unlock_bh(&fnhe_lock);
|
||||
}
|
||||
|
||||
/* called in rcu_read_lock() section */
|
||||
static int __mkroute_input(struct sk_buff *skb,
|
||||
const struct fib_result *res,
|
||||
@@ -1719,20 +1725,10 @@ static int __mkroute_input(struct sk_buff *skb,
|
||||
|
||||
fnhe = find_exception(&FIB_RES_NH(*res), daddr);
|
||||
if (do_cache) {
|
||||
if (fnhe) {
|
||||
if (fnhe)
|
||||
rth = rcu_dereference(fnhe->fnhe_rth_input);
|
||||
if (rth && rth->dst.expires &&
|
||||
time_after(jiffies, rth->dst.expires)) {
|
||||
ip_del_fnhe(&FIB_RES_NH(*res), daddr);
|
||||
fnhe = NULL;
|
||||
} else {
|
||||
goto rt_cache;
|
||||
}
|
||||
}
|
||||
|
||||
rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
|
||||
|
||||
rt_cache:
|
||||
else
|
||||
rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
|
||||
if (rt_cache_valid(rth)) {
|
||||
skb_dst_set_noref(skb, &rth->dst);
|
||||
goto out;
|
||||
@@ -2216,39 +2212,31 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
|
||||
* the loopback interface and the IP_PKTINFO ipi_ifindex will
|
||||
* be set to the loopback interface as well.
|
||||
*/
|
||||
fi = NULL;
|
||||
do_cache = false;
|
||||
}
|
||||
|
||||
fnhe = NULL;
|
||||
do_cache &= fi != NULL;
|
||||
if (do_cache) {
|
||||
if (fi) {
|
||||
struct rtable __rcu **prth;
|
||||
struct fib_nh *nh = &FIB_RES_NH(*res);
|
||||
|
||||
fnhe = find_exception(nh, fl4->daddr);
|
||||
if (!do_cache)
|
||||
goto add;
|
||||
if (fnhe) {
|
||||
prth = &fnhe->fnhe_rth_output;
|
||||
rth = rcu_dereference(*prth);
|
||||
if (rth && rth->dst.expires &&
|
||||
time_after(jiffies, rth->dst.expires)) {
|
||||
ip_del_fnhe(nh, fl4->daddr);
|
||||
fnhe = NULL;
|
||||
} else {
|
||||
goto rt_cache;
|
||||
} else {
|
||||
if (unlikely(fl4->flowi4_flags &
|
||||
FLOWI_FLAG_KNOWN_NH &&
|
||||
!(nh->nh_gw &&
|
||||
nh->nh_scope == RT_SCOPE_LINK))) {
|
||||
do_cache = false;
|
||||
goto add;
|
||||
}
|
||||
prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
|
||||
}
|
||||
|
||||
if (unlikely(fl4->flowi4_flags &
|
||||
FLOWI_FLAG_KNOWN_NH &&
|
||||
!(nh->nh_gw &&
|
||||
nh->nh_scope == RT_SCOPE_LINK))) {
|
||||
do_cache = false;
|
||||
goto add;
|
||||
}
|
||||
prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
|
||||
rth = rcu_dereference(*prth);
|
||||
|
||||
rt_cache:
|
||||
if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
|
||||
return rth;
|
||||
}
|
||||
|
@@ -697,7 +697,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
|
||||
{
|
||||
return skb->len < size_goal &&
|
||||
sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
|
||||
skb != tcp_write_queue_head(sk) &&
|
||||
!tcp_rtx_queue_empty(sk) &&
|
||||
refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
|
||||
}
|
||||
|
||||
@@ -1204,7 +1204,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
uarg->zerocopy = 0;
|
||||
}
|
||||
|
||||
if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) {
|
||||
if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
|
||||
!tp->repair) {
|
||||
err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
|
||||
if (err == -EINPROGRESS && copied_syn > 0)
|
||||
goto out;
|
||||
@@ -2833,7 +2834,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
|
||||
case TCP_REPAIR_QUEUE:
|
||||
if (!tp->repair)
|
||||
err = -EPERM;
|
||||
else if (val < TCP_QUEUES_NR)
|
||||
else if ((unsigned int)val < TCP_QUEUES_NR)
|
||||
tp->repair_queue = val;
|
||||
else
|
||||
err = -EINVAL;
|
||||
|
@@ -806,7 +806,9 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
|
||||
}
|
||||
}
|
||||
}
|
||||
bbr->idle_restart = 0;
|
||||
/* Restart after idle ends only once we process a new S/ACK for data */
|
||||
if (rs->delivered > 0)
|
||||
bbr->idle_restart = 0;
|
||||
}
|
||||
|
||||
static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
|
||||
|
@@ -1932,11 +1932,16 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb,
|
||||
const struct ipv6hdr *inner_iph;
|
||||
const struct icmp6hdr *icmph;
|
||||
struct ipv6hdr _inner_iph;
|
||||
struct icmp6hdr _icmph;
|
||||
|
||||
if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
|
||||
goto out;
|
||||
|
||||
icmph = icmp6_hdr(skb);
|
||||
icmph = skb_header_pointer(skb, skb_transport_offset(skb),
|
||||
sizeof(_icmph), &_icmph);
|
||||
if (!icmph)
|
||||
goto out;
|
||||
|
||||
if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
|
||||
icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
|
||||
icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
|
||||
|
@@ -558,6 +558,7 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
|
||||
struct rds_cmsg_rx_trace t;
|
||||
int i, j;
|
||||
|
||||
memset(&t, 0, sizeof(t));
|
||||
inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
|
||||
t.rx_traces = rs->rs_rx_traces;
|
||||
for (i = 0; i < rs->rs_rx_traces; i++) {
|
||||
|
@@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f)
|
||||
return f->next == &detached;
|
||||
}
|
||||
|
||||
static bool fq_flow_is_throttled(const struct fq_flow *f)
|
||||
{
|
||||
return f->next == &throttled;
|
||||
}
|
||||
|
||||
static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
|
||||
{
|
||||
if (head->first)
|
||||
head->last->next = flow;
|
||||
else
|
||||
head->first = flow;
|
||||
head->last = flow;
|
||||
flow->next = NULL;
|
||||
}
|
||||
|
||||
static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
|
||||
{
|
||||
rb_erase(&f->rate_node, &q->delayed);
|
||||
q->throttled_flows--;
|
||||
fq_flow_add_tail(&q->old_flows, f);
|
||||
}
|
||||
|
||||
static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
|
||||
{
|
||||
struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
|
||||
@@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
|
||||
|
||||
static struct kmem_cache *fq_flow_cachep __read_mostly;
|
||||
|
||||
static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
|
||||
{
|
||||
if (head->first)
|
||||
head->last->next = flow;
|
||||
else
|
||||
head->first = flow;
|
||||
head->last = flow;
|
||||
flow->next = NULL;
|
||||
}
|
||||
|
||||
/* limit number of collected flows per round */
|
||||
#define FQ_GC_MAX 8
|
||||
@@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
|
||||
f->socket_hash != sk->sk_hash)) {
|
||||
f->credit = q->initial_quantum;
|
||||
f->socket_hash = sk->sk_hash;
|
||||
if (fq_flow_is_throttled(f))
|
||||
fq_flow_unset_throttled(q, f);
|
||||
f->time_next_packet = 0ULL;
|
||||
}
|
||||
return f;
|
||||
@@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
|
||||
q->time_next_delayed_flow = f->time_next_packet;
|
||||
break;
|
||||
}
|
||||
rb_erase(p, &q->delayed);
|
||||
q->throttled_flows--;
|
||||
fq_flow_add_tail(&q->old_flows, f);
|
||||
fq_flow_unset_throttled(q, f);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -217,7 +217,7 @@ new_skb:
|
||||
skb_pull(chunk->skb, sizeof(*ch));
|
||||
chunk->subh.v = NULL; /* Subheader is no longer valid. */
|
||||
|
||||
if (chunk->chunk_end + sizeof(*ch) < skb_tail_pointer(chunk->skb)) {
|
||||
if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) {
|
||||
/* This is not a singleton */
|
||||
chunk->singleton = 0;
|
||||
} else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
|
||||
|
@@ -895,6 +895,9 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
|
||||
if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
|
||||
return 1;
|
||||
|
||||
if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET)
|
||||
return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr;
|
||||
|
||||
return __sctp_v6_cmp_addr(addr1, addr2);
|
||||
}
|
||||
|
||||
|
@@ -1794,6 +1794,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
|
||||
GFP_ATOMIC))
|
||||
goto nomem;
|
||||
|
||||
if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC))
|
||||
goto nomem;
|
||||
|
||||
/* Make sure no new addresses are being added during the
|
||||
* restart. Though this is a pretty complicated attack
|
||||
* since you'd have to get inside the cookie.
|
||||
@@ -1906,6 +1909,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_b(
|
||||
GFP_ATOMIC))
|
||||
goto nomem;
|
||||
|
||||
if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC))
|
||||
goto nomem;
|
||||
|
||||
/* Update the content of current association. */
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
|
||||
@@ -2050,7 +2056,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_d(
|
||||
}
|
||||
}
|
||||
|
||||
repl = sctp_make_cookie_ack(new_asoc, chunk);
|
||||
repl = sctp_make_cookie_ack(asoc, chunk);
|
||||
if (!repl)
|
||||
goto nomem;
|
||||
|
||||
|
@@ -240,6 +240,8 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
|
||||
|
||||
new->out = NULL;
|
||||
new->in = NULL;
|
||||
new->outcnt = 0;
|
||||
new->incnt = 0;
|
||||
}
|
||||
|
||||
static int sctp_send_reconf(struct sctp_association *asoc,
|
||||
|
@@ -293,6 +293,17 @@ static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
|
||||
smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
|
||||
}
|
||||
|
||||
/* register a new rmb */
|
||||
static int smc_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc)
|
||||
{
|
||||
/* register memory region for new rmb */
|
||||
if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) {
|
||||
rmb_desc->regerr = 1;
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smc_clnt_conf_first_link(struct smc_sock *smc)
|
||||
{
|
||||
struct net *net = sock_net(smc->clcsock->sk);
|
||||
@@ -323,9 +334,7 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc)
|
||||
|
||||
smc_wr_remember_qp_attr(link);
|
||||
|
||||
rc = smc_wr_reg_send(link,
|
||||
smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]);
|
||||
if (rc)
|
||||
if (smc_reg_rmb(link, smc->conn.rmb_desc))
|
||||
return SMC_CLC_DECL_INTERR;
|
||||
|
||||
/* send CONFIRM LINK response over RoCE fabric */
|
||||
@@ -478,13 +487,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
|
||||
goto decline_rdma_unlock;
|
||||
}
|
||||
} else {
|
||||
struct smc_buf_desc *buf_desc = smc->conn.rmb_desc;
|
||||
|
||||
if (!buf_desc->reused) {
|
||||
/* register memory region for new rmb */
|
||||
rc = smc_wr_reg_send(link,
|
||||
buf_desc->mr_rx[SMC_SINGLE_LINK]);
|
||||
if (rc) {
|
||||
if (!smc->conn.rmb_desc->reused) {
|
||||
if (smc_reg_rmb(link, smc->conn.rmb_desc)) {
|
||||
reason_code = SMC_CLC_DECL_INTERR;
|
||||
goto decline_rdma_unlock;
|
||||
}
|
||||
@@ -725,9 +729,7 @@ static int smc_serv_conf_first_link(struct smc_sock *smc)
|
||||
|
||||
link = &lgr->lnk[SMC_SINGLE_LINK];
|
||||
|
||||
rc = smc_wr_reg_send(link,
|
||||
smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]);
|
||||
if (rc)
|
||||
if (smc_reg_rmb(link, smc->conn.rmb_desc))
|
||||
return SMC_CLC_DECL_INTERR;
|
||||
|
||||
/* send CONFIRM LINK request to client over the RoCE fabric */
|
||||
@@ -863,13 +865,8 @@ static void smc_listen_work(struct work_struct *work)
|
||||
smc_rx_init(new_smc);
|
||||
|
||||
if (local_contact != SMC_FIRST_CONTACT) {
|
||||
struct smc_buf_desc *buf_desc = new_smc->conn.rmb_desc;
|
||||
|
||||
if (!buf_desc->reused) {
|
||||
/* register memory region for new rmb */
|
||||
rc = smc_wr_reg_send(link,
|
||||
buf_desc->mr_rx[SMC_SINGLE_LINK]);
|
||||
if (rc) {
|
||||
if (!new_smc->conn.rmb_desc->reused) {
|
||||
if (smc_reg_rmb(link, new_smc->conn.rmb_desc)) {
|
||||
reason_code = SMC_CLC_DECL_INTERR;
|
||||
goto decline_rdma_unlock;
|
||||
}
|
||||
@@ -1207,13 +1204,15 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
|
||||
/* delegate to CLC child sock */
|
||||
release_sock(sk);
|
||||
mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
|
||||
/* if non-blocking connect finished ... */
|
||||
lock_sock(sk);
|
||||
if ((sk->sk_state == SMC_INIT) && (mask & EPOLLOUT)) {
|
||||
sk->sk_err = smc->clcsock->sk->sk_err;
|
||||
if (sk->sk_err) {
|
||||
mask |= EPOLLERR;
|
||||
} else {
|
||||
sk->sk_err = smc->clcsock->sk->sk_err;
|
||||
if (sk->sk_err) {
|
||||
mask |= EPOLLERR;
|
||||
} else {
|
||||
/* if non-blocking connect finished ... */
|
||||
if (sk->sk_state == SMC_INIT &&
|
||||
mask & EPOLLOUT &&
|
||||
smc->clcsock->sk->sk_state != TCP_CLOSE) {
|
||||
rc = smc_connect_rdma(smc);
|
||||
if (rc < 0)
|
||||
mask |= EPOLLERR;
|
||||
@@ -1433,8 +1432,11 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
|
||||
|
||||
smc = smc_sk(sk);
|
||||
lock_sock(sk);
|
||||
if (sk->sk_state != SMC_ACTIVE)
|
||||
if (sk->sk_state != SMC_ACTIVE) {
|
||||
release_sock(sk);
|
||||
goto out;
|
||||
}
|
||||
release_sock(sk);
|
||||
if (smc->use_fallback)
|
||||
rc = kernel_sendpage(smc->clcsock, page, offset,
|
||||
size, flags);
|
||||
@@ -1442,7 +1444,6 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
|
||||
rc = sock_no_sendpage(sock, page, offset, size, flags);
|
||||
|
||||
out:
|
||||
release_sock(sk);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@@ -32,6 +32,9 @@
|
||||
|
||||
static u32 smc_lgr_num; /* unique link group number */
|
||||
|
||||
static void smc_buf_free(struct smc_buf_desc *buf_desc, struct smc_link *lnk,
|
||||
bool is_rmb);
|
||||
|
||||
static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
|
||||
{
|
||||
/* client link group creation always follows the server link group
|
||||
@@ -234,9 +237,22 @@ static void smc_buf_unuse(struct smc_connection *conn)
|
||||
conn->sndbuf_size = 0;
|
||||
}
|
||||
if (conn->rmb_desc) {
|
||||
conn->rmb_desc->reused = true;
|
||||
conn->rmb_desc->used = 0;
|
||||
conn->rmbe_size = 0;
|
||||
if (!conn->rmb_desc->regerr) {
|
||||
conn->rmb_desc->reused = 1;
|
||||
conn->rmb_desc->used = 0;
|
||||
conn->rmbe_size = 0;
|
||||
} else {
|
||||
/* buf registration failed, reuse not possible */
|
||||
struct smc_link_group *lgr = conn->lgr;
|
||||
struct smc_link *lnk;
|
||||
|
||||
write_lock_bh(&lgr->rmbs_lock);
|
||||
list_del(&conn->rmb_desc->list);
|
||||
write_unlock_bh(&lgr->rmbs_lock);
|
||||
|
||||
lnk = &lgr->lnk[SMC_SINGLE_LINK];
|
||||
smc_buf_free(conn->rmb_desc, lnk, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -127,7 +127,8 @@ struct smc_buf_desc {
|
||||
*/
|
||||
u32 order; /* allocation order */
|
||||
u32 used; /* currently used / unused */
|
||||
bool reused; /* new created / reused */
|
||||
u8 reused : 1; /* new created / reused */
|
||||
u8 regerr : 1; /* err during registration */
|
||||
};
|
||||
|
||||
struct smc_rtoken { /* address/key of remote RMB */
|
||||
|
@@ -2271,7 +2271,7 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
|
||||
rtnl_lock();
|
||||
for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
|
||||
err = __tipc_nl_add_monitor(net, &msg, prev_bearer);
|
||||
err = __tipc_nl_add_monitor(net, &msg, bearer_id);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
@@ -114,6 +114,7 @@ int tls_push_sg(struct sock *sk,
|
||||
size = sg->length - offset;
|
||||
offset += sg->offset;
|
||||
|
||||
ctx->in_tcp_sendpages = true;
|
||||
while (1) {
|
||||
if (sg_is_last(sg))
|
||||
sendpage_flags = flags;
|
||||
@@ -148,6 +149,8 @@ retry:
|
||||
}
|
||||
|
||||
clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
|
||||
ctx->in_tcp_sendpages = false;
|
||||
ctx->sk_write_space(sk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -217,6 +220,10 @@ static void tls_write_space(struct sock *sk)
|
||||
{
|
||||
struct tls_context *ctx = tls_get_ctx(sk);
|
||||
|
||||
/* We are already sending pages, ignore notification */
|
||||
if (ctx->in_tcp_sendpages)
|
||||
return;
|
||||
|
||||
if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
|
||||
gfp_t sk_allocation = sk->sk_allocation;
|
||||
int rc;
|
||||
|
Referência em uma nova issue
Block a user