Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net
Simple overlapping changes in stmmac driver. Adjust skb_gro_flush_final_remcsum function signature to make GRO list changes in net-next, as per Stephen Rothwell's example merge resolution. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -986,7 +986,7 @@ const struct proto_ops inet_stream_ops = {
|
||||
.socketpair = sock_no_socketpair,
|
||||
.accept = inet_accept,
|
||||
.getname = inet_getname,
|
||||
.poll_mask = tcp_poll_mask,
|
||||
.poll = tcp_poll,
|
||||
.ioctl = inet_ioctl,
|
||||
.listen = inet_listen,
|
||||
.shutdown = inet_shutdown,
|
||||
@@ -1021,7 +1021,7 @@ const struct proto_ops inet_dgram_ops = {
|
||||
.socketpair = sock_no_socketpair,
|
||||
.accept = sock_no_accept,
|
||||
.getname = inet_getname,
|
||||
.poll_mask = udp_poll_mask,
|
||||
.poll = udp_poll,
|
||||
.ioctl = inet_ioctl,
|
||||
.listen = sock_no_listen,
|
||||
.shutdown = inet_shutdown,
|
||||
@@ -1042,7 +1042,7 @@ EXPORT_SYMBOL(inet_dgram_ops);
|
||||
|
||||
/*
|
||||
* For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
|
||||
* udp_poll_mask
|
||||
* udp_poll
|
||||
*/
|
||||
static const struct proto_ops inet_sockraw_ops = {
|
||||
.family = PF_INET,
|
||||
@@ -1053,7 +1053,7 @@ static const struct proto_ops inet_sockraw_ops = {
|
||||
.socketpair = sock_no_socketpair,
|
||||
.accept = sock_no_accept,
|
||||
.getname = inet_getname,
|
||||
.poll_mask = datagram_poll_mask,
|
||||
.poll = datagram_poll,
|
||||
.ioctl = inet_ioctl,
|
||||
.listen = sock_no_listen,
|
||||
.shutdown = inet_shutdown,
|
||||
|
@@ -448,9 +448,7 @@ next_proto:
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
NAPI_GRO_CB(skb)->flush |= flush;
|
||||
skb_gro_remcsum_cleanup(skb, &grc);
|
||||
skb->remcsum_offload = 0;
|
||||
skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
|
||||
|
||||
return pp;
|
||||
}
|
||||
|
@@ -223,7 +223,7 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
NAPI_GRO_CB(skb)->flush |= flush;
|
||||
skb_gro_flush_final(skb, pp, flush);
|
||||
|
||||
return pp;
|
||||
}
|
||||
|
@@ -265,8 +265,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
|
||||
ipv4.sysctl_tcp_fastopen);
|
||||
struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
|
||||
struct tcp_fastopen_context *ctxt;
|
||||
int ret;
|
||||
u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
|
||||
__le32 key[4];
|
||||
int ret, i;
|
||||
|
||||
tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
|
||||
if (!tbl.data)
|
||||
@@ -275,11 +276,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
|
||||
rcu_read_lock();
|
||||
ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
|
||||
if (ctxt)
|
||||
memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
|
||||
memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
|
||||
else
|
||||
memset(user_key, 0, sizeof(user_key));
|
||||
memset(key, 0, sizeof(key));
|
||||
rcu_read_unlock();
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(key); i++)
|
||||
user_key[i] = le32_to_cpu(key[i]);
|
||||
|
||||
snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
|
||||
user_key[0], user_key[1], user_key[2], user_key[3]);
|
||||
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
|
||||
@@ -290,13 +294,17 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
|
||||
ret = -EINVAL;
|
||||
goto bad_key;
|
||||
}
|
||||
tcp_fastopen_reset_cipher(net, NULL, user_key,
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(user_key); i++)
|
||||
key[i] = cpu_to_le32(user_key[i]);
|
||||
|
||||
tcp_fastopen_reset_cipher(net, NULL, key,
|
||||
TCP_FASTOPEN_KEY_LENGTH);
|
||||
}
|
||||
|
||||
bad_key:
|
||||
pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
|
||||
user_key[0], user_key[1], user_key[2], user_key[3],
|
||||
user_key[0], user_key[1], user_key[2], user_key[3],
|
||||
(char *)tbl.data, ret);
|
||||
kfree(tbl.data);
|
||||
return ret;
|
||||
|
@@ -494,21 +494,32 @@ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
|
||||
}
|
||||
|
||||
/*
|
||||
* Socket is not locked. We are protected from async events by poll logic and
|
||||
* correct handling of state changes made by other threads is impossible in
|
||||
* any case.
|
||||
* Wait for a TCP event.
|
||||
*
|
||||
* Note that we don't need to lock the socket, as the upper poll layers
|
||||
* take care of normal races (between the test and the event) and we don't
|
||||
* go look at any of the socket buffers directly.
|
||||
*/
|
||||
__poll_t tcp_poll_mask(struct socket *sock, __poll_t events)
|
||||
__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||
{
|
||||
__poll_t mask;
|
||||
struct sock *sk = sock->sk;
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
__poll_t mask = 0;
|
||||
int state;
|
||||
|
||||
sock_poll_wait(file, sk_sleep(sk), wait);
|
||||
|
||||
state = inet_sk_state_load(sk);
|
||||
if (state == TCP_LISTEN)
|
||||
return inet_csk_listen_poll(sk);
|
||||
|
||||
/* Socket is not locked. We are protected from async events
|
||||
* by poll logic and correct handling of state changes
|
||||
* made by other threads is impossible in any case.
|
||||
*/
|
||||
|
||||
mask = 0;
|
||||
|
||||
/*
|
||||
* EPOLLHUP is certainly not done right. But poll() doesn't
|
||||
* have a notion of HUP in just one direction, and for a
|
||||
@@ -589,7 +600,7 @@ __poll_t tcp_poll_mask(struct socket *sock, __poll_t events)
|
||||
|
||||
return mask;
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_poll_mask);
|
||||
EXPORT_SYMBOL(tcp_poll);
|
||||
|
||||
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
||||
{
|
||||
|
@@ -266,7 +266,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
|
||||
* it is probably a retransmit.
|
||||
*/
|
||||
if (tp->ecn_flags & TCP_ECN_SEEN)
|
||||
tcp_enter_quickack_mode(sk, 1);
|
||||
tcp_enter_quickack_mode(sk, 2);
|
||||
break;
|
||||
case INET_ECN_CE:
|
||||
if (tcp_ca_needs_ecn(sk))
|
||||
@@ -274,7 +274,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
|
||||
|
||||
if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
|
||||
/* Better not delay acks, sender can have a very low cwnd */
|
||||
tcp_enter_quickack_mode(sk, 1);
|
||||
tcp_enter_quickack_mode(sk, 2);
|
||||
tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
|
||||
}
|
||||
tp->ecn_flags |= TCP_ECN_SEEN;
|
||||
@@ -3185,6 +3185,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
|
||||
|
||||
if (tcp_is_reno(tp)) {
|
||||
tcp_remove_reno_sacks(sk, pkts_acked);
|
||||
|
||||
/* If any of the cumulatively ACKed segments was
|
||||
* retransmitted, non-SACK case cannot confirm that
|
||||
* progress was due to original transmission due to
|
||||
* lack of TCPCB_SACKED_ACKED bits even if some of
|
||||
* the packets may have been never retransmitted.
|
||||
*/
|
||||
if (flag & FLAG_RETRANS_DATA_ACKED)
|
||||
flag &= ~FLAG_ORIG_SACK_ACKED;
|
||||
} else {
|
||||
int delta;
|
||||
|
||||
|
@@ -2591,7 +2591,7 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname,
|
||||
* udp_poll - wait for a UDP event.
|
||||
* @file - file struct
|
||||
* @sock - socket
|
||||
* @events - events to wait for
|
||||
* @wait - poll table
|
||||
*
|
||||
* This is same as datagram poll, except for the special case of
|
||||
* blocking sockets. If application is using a blocking fd
|
||||
@@ -2600,23 +2600,23 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname,
|
||||
* but then block when reading it. Add special case code
|
||||
* to work around these arguably broken applications.
|
||||
*/
|
||||
__poll_t udp_poll_mask(struct socket *sock, __poll_t events)
|
||||
__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||
{
|
||||
__poll_t mask = datagram_poll_mask(sock, events);
|
||||
__poll_t mask = datagram_poll(file, sock, wait);
|
||||
struct sock *sk = sock->sk;
|
||||
|
||||
if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
|
||||
/* Check for false positives due to checksum errors */
|
||||
if ((mask & EPOLLRDNORM) && !(sock->file->f_flags & O_NONBLOCK) &&
|
||||
if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
|
||||
!(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
|
||||
mask &= ~(EPOLLIN | EPOLLRDNORM);
|
||||
|
||||
return mask;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(udp_poll_mask);
|
||||
EXPORT_SYMBOL(udp_poll);
|
||||
|
||||
int udp_abort(struct sock *sk, int err)
|
||||
{
|
||||
|
@@ -395,7 +395,7 @@ unflush:
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
NAPI_GRO_CB(skb)->flush |= flush;
|
||||
skb_gro_flush_final(skb, pp, flush);
|
||||
return pp;
|
||||
}
|
||||
EXPORT_SYMBOL(udp_gro_receive);
|
||||
|
Reference in New Issue
Block a user