Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: "Some merge window fallout, some longer term fixes: 1) Handle headroom properly in lapbether and x25_asy drivers, from Xie He. 2) Fetch MAC address from correct r8152 device node, from Thierry Reding. 3) In the sw kTLS path we should allow MSG_CMSG_COMPAT in sendmsg, from Rouven Czerwinski. 4) Correct fdputs in socket layer, from Miaohe Lin. 5) Revert troublesome sockptr_t optimization, from Christoph Hellwig. 6) Fix TCP TFO key reading on big endian, from Jason Baron. 7) Missing CAP_NET_RAW check in nfc, from Qingyu Li. 8) Fix inet fastreuse optimization with tproxy sockets, from Tim Froidcoeur. 9) Fix 64-bit divide in new SFC driver, from Edward Cree. 10) Add a tracepoint for prandom_u32 so that we can more easily perform usage analysis. From Eric Dumazet. 11) Fix rwlock imbalance in AF_PACKET, from John Ogness" * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (49 commits) net: openvswitch: introduce common code for flushing flows af_packet: TPACKET_V3: fix fill status rwlock imbalance random32: add a tracepoint for prandom_u32() Revert "ipv4: tunnel: fix compilation on ARCH=um" net: accept an empty mask in /sys/class/net/*/queues/rx-*/rps_cpus net: ethernet: stmmac: Disable hardware multicast filter net: stmmac: dwmac1000: provide multicast filter fallback ipv4: tunnel: fix compilation on ARCH=um vsock: fix potential null pointer dereference in vsock_poll() sfc: fix ef100 design-param checking net: initialize fastreuse on inet_inherit_port net: refactor bind_bucket fastreuse into helper net: phy: marvell10g: fix null pointer dereference net: Fix potential memory leak in proto_register() net: qcom/emac: add missed clk_disable_unprepare in error path of emac_clks_phase1_init ionic_lif: Use devm_kcalloc() in ionic_qcq_alloc() net/nfc/rawsock.c: add CAP_NET_RAW check. hinic: fix strncpy output truncated compile warnings drivers/net/wan/x25_asy: Added needed_headroom and a skb->len check net/tls: Fix kmap usage ...
This commit is contained in:
@@ -1384,18 +1384,39 @@ static int bpf_iter_init_sk_storage_map(void *priv_data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_iter_check_map(struct bpf_prog *prog,
|
||||
struct bpf_iter_aux_info *aux)
|
||||
static int bpf_iter_attach_map(struct bpf_prog *prog,
|
||||
union bpf_iter_link_info *linfo,
|
||||
struct bpf_iter_aux_info *aux)
|
||||
{
|
||||
struct bpf_map *map = aux->map;
|
||||
struct bpf_map *map;
|
||||
int err = -EINVAL;
|
||||
|
||||
if (!linfo->map.map_fd)
|
||||
return -EBADF;
|
||||
|
||||
map = bpf_map_get_with_uref(linfo->map.map_fd);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
|
||||
return -EINVAL;
|
||||
goto put_map;
|
||||
|
||||
if (prog->aux->max_rdonly_access > map->value_size)
|
||||
return -EACCES;
|
||||
if (prog->aux->max_rdonly_access > map->value_size) {
|
||||
err = -EACCES;
|
||||
goto put_map;
|
||||
}
|
||||
|
||||
aux->map = map;
|
||||
return 0;
|
||||
|
||||
put_map:
|
||||
bpf_map_put_with_uref(map);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
|
||||
{
|
||||
bpf_map_put_with_uref(aux->map);
|
||||
}
|
||||
|
||||
static const struct seq_operations bpf_sk_storage_map_seq_ops = {
|
||||
@@ -1414,8 +1435,8 @@ static const struct bpf_iter_seq_info iter_seq_info = {
|
||||
|
||||
static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
|
||||
.target = "bpf_sk_storage_map",
|
||||
.check_target = bpf_iter_check_map,
|
||||
.req_linfo = BPF_ITER_LINK_MAP_FD,
|
||||
.attach_target = bpf_iter_attach_map,
|
||||
.detach_target = bpf_iter_detach_map,
|
||||
.ctx_arg_info_size = 2,
|
||||
.ctx_arg_info = {
|
||||
{ offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
|
||||
|
@@ -757,11 +757,13 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
|
||||
return err;
|
||||
}
|
||||
|
||||
hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
|
||||
cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
|
||||
if (cpumask_empty(mask)) {
|
||||
free_cpumask_var(mask);
|
||||
return -EINVAL;
|
||||
if (!cpumask_empty(mask)) {
|
||||
hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
|
||||
cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
|
||||
if (cpumask_empty(mask)) {
|
||||
free_cpumask_var(mask);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
map = kzalloc(max_t(unsigned int,
|
||||
|
@@ -4853,7 +4853,7 @@ static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
|
||||
if (ip_is_fragment(ip_hdr(skb)))
|
||||
fragment = true;
|
||||
|
||||
off = ip_hdrlen(skb);
|
||||
|
@@ -3414,6 +3414,16 @@ static void sock_inuse_add(struct net *net, int val)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
|
||||
{
|
||||
if (!twsk_prot)
|
||||
return;
|
||||
kfree(twsk_prot->twsk_slab_name);
|
||||
twsk_prot->twsk_slab_name = NULL;
|
||||
kmem_cache_destroy(twsk_prot->twsk_slab);
|
||||
twsk_prot->twsk_slab = NULL;
|
||||
}
|
||||
|
||||
static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
|
||||
{
|
||||
if (!rsk_prot)
|
||||
@@ -3484,7 +3494,7 @@ int proto_register(struct proto *prot, int alloc_slab)
|
||||
prot->slab_flags,
|
||||
NULL);
|
||||
if (prot->twsk_prot->twsk_slab == NULL)
|
||||
goto out_free_timewait_sock_slab_name;
|
||||
goto out_free_timewait_sock_slab;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3492,15 +3502,15 @@ int proto_register(struct proto *prot, int alloc_slab)
|
||||
ret = assign_proto_idx(prot);
|
||||
if (ret) {
|
||||
mutex_unlock(&proto_list_mutex);
|
||||
goto out_free_timewait_sock_slab_name;
|
||||
goto out_free_timewait_sock_slab;
|
||||
}
|
||||
list_add(&prot->node, &proto_list);
|
||||
mutex_unlock(&proto_list_mutex);
|
||||
return ret;
|
||||
|
||||
out_free_timewait_sock_slab_name:
|
||||
out_free_timewait_sock_slab:
|
||||
if (alloc_slab && prot->twsk_prot)
|
||||
kfree(prot->twsk_prot->twsk_slab_name);
|
||||
tw_prot_cleanup(prot->twsk_prot);
|
||||
out_free_request_sock_slab:
|
||||
if (alloc_slab) {
|
||||
req_prot_cleanup(prot->rsk_prot);
|
||||
@@ -3524,12 +3534,7 @@ void proto_unregister(struct proto *prot)
|
||||
prot->slab = NULL;
|
||||
|
||||
req_prot_cleanup(prot->rsk_prot);
|
||||
|
||||
if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
|
||||
kmem_cache_destroy(prot->twsk_prot->twsk_slab);
|
||||
kfree(prot->twsk_prot->twsk_slab_name);
|
||||
prot->twsk_prot->twsk_slab = NULL;
|
||||
}
|
||||
tw_prot_cleanup(prot->twsk_prot);
|
||||
}
|
||||
EXPORT_SYMBOL(proto_unregister);
|
||||
|
||||
|
@@ -57,18 +57,16 @@ int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval,
|
||||
return bpfilter_mbox_request(sk, optname, optval, optlen, true);
|
||||
}
|
||||
|
||||
int bpfilter_ip_get_sockopt(struct sock *sk, int optname,
|
||||
char __user *user_optval, int __user *optlen)
|
||||
int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
|
||||
int __user *optlen)
|
||||
{
|
||||
sockptr_t optval;
|
||||
int err, len;
|
||||
int len;
|
||||
|
||||
if (get_user(len, optlen))
|
||||
return -EFAULT;
|
||||
err = init_user_sockptr(&optval, user_optval, len);
|
||||
if (err)
|
||||
return err;
|
||||
return bpfilter_mbox_request(sk, optname, optval, len, false);
|
||||
|
||||
return bpfilter_mbox_request(sk, optname, USER_SOCKPTR(optval), len,
|
||||
false);
|
||||
}
|
||||
|
||||
static int __init bpfilter_sockopt_init(void)
|
||||
|
@@ -296,55 +296,12 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
|
||||
ipv6_only_sock(sk), true, false);
|
||||
}
|
||||
|
||||
/* Obtain a reference to a local port for the given sock,
|
||||
* if snum is zero it means select any available local port.
|
||||
* We try to allocate an odd port (and leave even ports for connect())
|
||||
*/
|
||||
int inet_csk_get_port(struct sock *sk, unsigned short snum)
|
||||
void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
|
||||
struct sock *sk)
|
||||
{
|
||||
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
|
||||
struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
|
||||
int ret = 1, port = snum;
|
||||
struct inet_bind_hashbucket *head;
|
||||
struct net *net = sock_net(sk);
|
||||
struct inet_bind_bucket *tb = NULL;
|
||||
kuid_t uid = sock_i_uid(sk);
|
||||
int l3mdev;
|
||||
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
|
||||
|
||||
l3mdev = inet_sk_bound_l3mdev(sk);
|
||||
|
||||
if (!port) {
|
||||
head = inet_csk_find_open_port(sk, &tb, &port);
|
||||
if (!head)
|
||||
return ret;
|
||||
if (!tb)
|
||||
goto tb_not_found;
|
||||
goto success;
|
||||
}
|
||||
head = &hinfo->bhash[inet_bhashfn(net, port,
|
||||
hinfo->bhash_size)];
|
||||
spin_lock_bh(&head->lock);
|
||||
inet_bind_bucket_for_each(tb, &head->chain)
|
||||
if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
|
||||
tb->port == port)
|
||||
goto tb_found;
|
||||
tb_not_found:
|
||||
tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
|
||||
net, head, port, l3mdev);
|
||||
if (!tb)
|
||||
goto fail_unlock;
|
||||
tb_found:
|
||||
if (!hlist_empty(&tb->owners)) {
|
||||
if (sk->sk_reuse == SK_FORCE_REUSE)
|
||||
goto success;
|
||||
|
||||
if ((tb->fastreuse > 0 && reuse) ||
|
||||
sk_reuseport_match(tb, sk))
|
||||
goto success;
|
||||
if (inet_csk_bind_conflict(sk, tb, true, true))
|
||||
goto fail_unlock;
|
||||
}
|
||||
success:
|
||||
if (hlist_empty(&tb->owners)) {
|
||||
tb->fastreuse = reuse;
|
||||
if (sk->sk_reuseport) {
|
||||
@@ -388,6 +345,58 @@ success:
|
||||
tb->fastreuseport = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Obtain a reference to a local port for the given sock,
|
||||
* if snum is zero it means select any available local port.
|
||||
* We try to allocate an odd port (and leave even ports for connect())
|
||||
*/
|
||||
int inet_csk_get_port(struct sock *sk, unsigned short snum)
|
||||
{
|
||||
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
|
||||
struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
|
||||
int ret = 1, port = snum;
|
||||
struct inet_bind_hashbucket *head;
|
||||
struct net *net = sock_net(sk);
|
||||
struct inet_bind_bucket *tb = NULL;
|
||||
int l3mdev;
|
||||
|
||||
l3mdev = inet_sk_bound_l3mdev(sk);
|
||||
|
||||
if (!port) {
|
||||
head = inet_csk_find_open_port(sk, &tb, &port);
|
||||
if (!head)
|
||||
return ret;
|
||||
if (!tb)
|
||||
goto tb_not_found;
|
||||
goto success;
|
||||
}
|
||||
head = &hinfo->bhash[inet_bhashfn(net, port,
|
||||
hinfo->bhash_size)];
|
||||
spin_lock_bh(&head->lock);
|
||||
inet_bind_bucket_for_each(tb, &head->chain)
|
||||
if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
|
||||
tb->port == port)
|
||||
goto tb_found;
|
||||
tb_not_found:
|
||||
tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
|
||||
net, head, port, l3mdev);
|
||||
if (!tb)
|
||||
goto fail_unlock;
|
||||
tb_found:
|
||||
if (!hlist_empty(&tb->owners)) {
|
||||
if (sk->sk_reuse == SK_FORCE_REUSE)
|
||||
goto success;
|
||||
|
||||
if ((tb->fastreuse > 0 && reuse) ||
|
||||
sk_reuseport_match(tb, sk))
|
||||
goto success;
|
||||
if (inet_csk_bind_conflict(sk, tb, true, true))
|
||||
goto fail_unlock;
|
||||
}
|
||||
success:
|
||||
inet_csk_update_fastreuse(tb, sk);
|
||||
|
||||
if (!inet_csk(sk)->icsk_bind_hash)
|
||||
inet_bind_hash(sk, tb, port);
|
||||
WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
|
||||
|
@@ -163,6 +163,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
inet_csk_update_fastreuse(tb, child);
|
||||
}
|
||||
inet_bind_hash(child, tb, port);
|
||||
spin_unlock(&head->lock);
|
||||
|
@@ -301,24 +301,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
|
||||
struct ctl_table tbl = { .maxlen = ((TCP_FASTOPEN_KEY_LENGTH *
|
||||
2 * TCP_FASTOPEN_KEY_MAX) +
|
||||
(TCP_FASTOPEN_KEY_MAX * 5)) };
|
||||
struct tcp_fastopen_context *ctx;
|
||||
u32 user_key[TCP_FASTOPEN_KEY_MAX * 4];
|
||||
__le32 key[TCP_FASTOPEN_KEY_MAX * 4];
|
||||
u32 user_key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u32)];
|
||||
__le32 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(__le32)];
|
||||
char *backup_data;
|
||||
int ret, i = 0, off = 0, n_keys = 0;
|
||||
int ret, i = 0, off = 0, n_keys;
|
||||
|
||||
tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
|
||||
if (!tbl.data)
|
||||
return -ENOMEM;
|
||||
|
||||
rcu_read_lock();
|
||||
ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
|
||||
if (ctx) {
|
||||
n_keys = tcp_fastopen_context_len(ctx);
|
||||
memcpy(&key[0], &ctx->key[0], TCP_FASTOPEN_KEY_LENGTH * n_keys);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
n_keys = tcp_fastopen_get_cipher(net, NULL, (u64 *)key);
|
||||
if (!n_keys) {
|
||||
memset(&key[0], 0, TCP_FASTOPEN_KEY_LENGTH);
|
||||
n_keys = 1;
|
||||
|
@@ -3685,22 +3685,14 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
|
||||
return 0;
|
||||
|
||||
case TCP_FASTOPEN_KEY: {
|
||||
__u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH];
|
||||
struct tcp_fastopen_context *ctx;
|
||||
unsigned int key_len = 0;
|
||||
u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)];
|
||||
unsigned int key_len;
|
||||
|
||||
if (get_user(len, optlen))
|
||||
return -EFAULT;
|
||||
|
||||
rcu_read_lock();
|
||||
ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
|
||||
if (ctx) {
|
||||
key_len = tcp_fastopen_context_len(ctx) *
|
||||
TCP_FASTOPEN_KEY_LENGTH;
|
||||
memcpy(&key[0], &ctx->key[0], key_len);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
key_len = tcp_fastopen_get_cipher(net, icsk, key) *
|
||||
TCP_FASTOPEN_KEY_LENGTH;
|
||||
len = min_t(unsigned int, len, key_len);
|
||||
if (put_user(len, optlen))
|
||||
return -EFAULT;
|
||||
|
@@ -108,6 +108,29 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
|
||||
u64 *key)
|
||||
{
|
||||
struct tcp_fastopen_context *ctx;
|
||||
int n_keys = 0, i;
|
||||
|
||||
rcu_read_lock();
|
||||
if (icsk)
|
||||
ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
|
||||
else
|
||||
ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
|
||||
if (ctx) {
|
||||
n_keys = tcp_fastopen_context_len(ctx);
|
||||
for (i = 0; i < n_keys; i++) {
|
||||
put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
|
||||
put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return n_keys;
|
||||
}
|
||||
|
||||
static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
|
||||
struct sk_buff *syn,
|
||||
const siphash_key_t *key,
|
||||
|
@@ -423,12 +423,12 @@ static void mptcp_sock_destruct(struct sock *sk)
|
||||
* also remove the mptcp socket, via
|
||||
* sock_put(ctx->conn).
|
||||
*
|
||||
* Problem is that the mptcp socket will not be in
|
||||
* SYN_RECV state and doesn't have SOCK_DEAD flag.
|
||||
* Problem is that the mptcp socket will be in
|
||||
* ESTABLISHED state and will not have the SOCK_DEAD flag.
|
||||
* Both result in warnings from inet_sock_destruct.
|
||||
*/
|
||||
|
||||
if (sk->sk_state == TCP_SYN_RECV) {
|
||||
if (sk->sk_state == TCP_ESTABLISHED) {
|
||||
sk->sk_state = TCP_CLOSE;
|
||||
WARN_ON_ONCE(sk->sk_socket);
|
||||
sock_orphan(sk);
|
||||
|
@@ -328,10 +328,13 @@ static int rawsock_create(struct net *net, struct socket *sock,
|
||||
if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW))
|
||||
return -ESOCKTNOSUPPORT;
|
||||
|
||||
if (sock->type == SOCK_RAW)
|
||||
if (sock->type == SOCK_RAW) {
|
||||
if (!capable(CAP_NET_RAW))
|
||||
return -EPERM;
|
||||
sock->ops = &rawsock_raw_ops;
|
||||
else
|
||||
} else {
|
||||
sock->ops = &rawsock_ops;
|
||||
}
|
||||
|
||||
sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern);
|
||||
if (!sk)
|
||||
|
@@ -1756,6 +1756,7 @@ err:
|
||||
/* Called with ovs_mutex. */
|
||||
static void __dp_destroy(struct datapath *dp)
|
||||
{
|
||||
struct flow_table *table = &dp->table;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
|
||||
@@ -1774,7 +1775,14 @@ static void __dp_destroy(struct datapath *dp)
|
||||
*/
|
||||
ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
|
||||
|
||||
/* RCU destroy the flow table */
|
||||
/* Flush sw_flow in the tables. RCU cb only releases resource
|
||||
* such as dp, ports and tables. That may avoid some issues
|
||||
* such as RCU usage warning.
|
||||
*/
|
||||
table_instance_flow_flush(table, ovsl_dereference(table->ti),
|
||||
ovsl_dereference(table->ufid_ti));
|
||||
|
||||
/* RCU destroy the ports, meters and flow tables. */
|
||||
call_rcu(&dp->rcu, destroy_dp_rcu);
|
||||
}
|
||||
|
||||
|
@@ -473,19 +473,15 @@ static void table_instance_flow_free(struct flow_table *table,
|
||||
flow_mask_remove(table, flow->mask);
|
||||
}
|
||||
|
||||
static void table_instance_destroy(struct flow_table *table,
|
||||
struct table_instance *ti,
|
||||
struct table_instance *ufid_ti,
|
||||
bool deferred)
|
||||
/* Must be called with OVS mutex held. */
|
||||
void table_instance_flow_flush(struct flow_table *table,
|
||||
struct table_instance *ti,
|
||||
struct table_instance *ufid_ti)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!ti)
|
||||
return;
|
||||
|
||||
BUG_ON(!ufid_ti);
|
||||
if (ti->keep_flows)
|
||||
goto skip_flows;
|
||||
return;
|
||||
|
||||
for (i = 0; i < ti->n_buckets; i++) {
|
||||
struct sw_flow *flow;
|
||||
@@ -497,18 +493,16 @@ static void table_instance_destroy(struct flow_table *table,
|
||||
|
||||
table_instance_flow_free(table, ti, ufid_ti,
|
||||
flow, false);
|
||||
ovs_flow_free(flow, deferred);
|
||||
ovs_flow_free(flow, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
skip_flows:
|
||||
if (deferred) {
|
||||
call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
|
||||
call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
|
||||
} else {
|
||||
__table_instance_destroy(ti);
|
||||
__table_instance_destroy(ufid_ti);
|
||||
}
|
||||
static void table_instance_destroy(struct table_instance *ti,
|
||||
struct table_instance *ufid_ti)
|
||||
{
|
||||
call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
|
||||
call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
|
||||
}
|
||||
|
||||
/* No need for locking this function is called from RCU callback or
|
||||
@@ -523,7 +517,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
|
||||
|
||||
call_rcu(&mc->rcu, mask_cache_rcu_cb);
|
||||
call_rcu(&ma->rcu, mask_array_rcu_cb);
|
||||
table_instance_destroy(table, ti, ufid_ti, false);
|
||||
table_instance_destroy(ti, ufid_ti);
|
||||
}
|
||||
|
||||
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
|
||||
@@ -641,7 +635,8 @@ int ovs_flow_tbl_flush(struct flow_table *flow_table)
|
||||
flow_table->count = 0;
|
||||
flow_table->ufid_count = 0;
|
||||
|
||||
table_instance_destroy(flow_table, old_ti, old_ufid_ti, true);
|
||||
table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
|
||||
table_instance_destroy(old_ti, old_ufid_ti);
|
||||
return 0;
|
||||
|
||||
err_free_ti:
|
||||
|
@@ -105,5 +105,8 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
|
||||
bool full, const struct sw_flow_mask *mask);
|
||||
|
||||
void ovs_flow_masks_rebalance(struct flow_table *table);
|
||||
void table_instance_flow_flush(struct flow_table *table,
|
||||
struct table_instance *ti,
|
||||
struct table_instance *ufid_ti);
|
||||
|
||||
#endif /* flow_table.h */
|
||||
|
@@ -941,6 +941,7 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
|
||||
}
|
||||
|
||||
static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
|
||||
__releases(&pkc->blk_fill_in_prog_lock)
|
||||
{
|
||||
struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
|
||||
|
||||
@@ -989,6 +990,7 @@ static void prb_fill_curr_block(char *curr,
|
||||
struct tpacket_kbdq_core *pkc,
|
||||
struct tpacket_block_desc *pbd,
|
||||
unsigned int len)
|
||||
__acquires(&pkc->blk_fill_in_prog_lock)
|
||||
{
|
||||
struct tpacket3_hdr *ppd;
|
||||
|
||||
@@ -2286,8 +2288,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
if (do_vnet &&
|
||||
virtio_net_hdr_from_skb(skb, h.raw + macoff -
|
||||
sizeof(struct virtio_net_hdr),
|
||||
vio_le(), true, 0))
|
||||
vio_le(), true, 0)) {
|
||||
if (po->tp_version == TPACKET_V3)
|
||||
prb_clear_blk_fill_status(&po->rx_ring);
|
||||
goto drop_n_account;
|
||||
}
|
||||
|
||||
if (po->tp_version <= TPACKET_V2) {
|
||||
packet_increment_rx_head(po, &po->rx_ring);
|
||||
@@ -2393,7 +2398,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
__clear_bit(slot_id, po->rx_ring.rx_owner_map);
|
||||
spin_unlock(&sk->sk_receive_queue.lock);
|
||||
sk->sk_data_ready(sk);
|
||||
} else {
|
||||
} else if (po->tp_version == TPACKET_V3) {
|
||||
prb_clear_blk_fill_status(&po->rx_ring);
|
||||
}
|
||||
|
||||
|
23
net/socket.c
23
net/socket.c
@@ -500,7 +500,7 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
|
||||
if (f.file) {
|
||||
sock = sock_from_file(f.file, err);
|
||||
if (likely(sock)) {
|
||||
*fput_needed = f.flags;
|
||||
*fput_needed = f.flags & FDPUT_FPUT;
|
||||
return sock;
|
||||
}
|
||||
fdput(f);
|
||||
@@ -1325,7 +1325,7 @@ int sock_wake_async(struct socket_wq *wq, int how, int band)
|
||||
case SOCK_WAKE_SPACE:
|
||||
if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags))
|
||||
break;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case SOCK_WAKE_IO:
|
||||
call_kill:
|
||||
kill_fasync(&wq->fasync_list, SIGIO, band);
|
||||
@@ -1804,8 +1804,7 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
|
||||
ret = __sys_accept4_file(f.file, 0, upeer_sockaddr,
|
||||
upeer_addrlen, flags,
|
||||
rlimit(RLIMIT_NOFILE));
|
||||
if (f.flags)
|
||||
fput(f.file);
|
||||
fdput(f);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -1868,8 +1867,7 @@ int __sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen)
|
||||
ret = move_addr_to_kernel(uservaddr, addrlen, &address);
|
||||
if (!ret)
|
||||
ret = __sys_connect_file(f.file, &address, addrlen, 0);
|
||||
if (f.flags)
|
||||
fput(f.file);
|
||||
fdput(f);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -2097,7 +2095,7 @@ static bool sock_use_custom_sol_socket(const struct socket *sock)
|
||||
int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
|
||||
int optlen)
|
||||
{
|
||||
sockptr_t optval;
|
||||
sockptr_t optval = USER_SOCKPTR(user_optval);
|
||||
char *kernel_optval = NULL;
|
||||
int err, fput_needed;
|
||||
struct socket *sock;
|
||||
@@ -2105,10 +2103,6 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
|
||||
if (optlen < 0)
|
||||
return -EINVAL;
|
||||
|
||||
err = init_user_sockptr(&optval, user_optval, optlen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
sock = sockfd_lookup_light(fd, &err, &fput_needed);
|
||||
if (!sock)
|
||||
return err;
|
||||
@@ -3065,7 +3059,7 @@ static int __init sock_init(void)
|
||||
|
||||
err = register_filesystem(&sock_fs_type);
|
||||
if (err)
|
||||
goto out_fs;
|
||||
goto out;
|
||||
sock_mnt = kern_mount(&sock_fs_type);
|
||||
if (IS_ERR(sock_mnt)) {
|
||||
err = PTR_ERR(sock_mnt);
|
||||
@@ -3088,7 +3082,6 @@ out:
|
||||
|
||||
out_mount:
|
||||
unregister_filesystem(&sock_fs_type);
|
||||
out_fs:
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -3161,13 +3154,13 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
|
||||
if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32))
|
||||
return -ENOMEM;
|
||||
buf_size += rule_cnt * sizeof(u32);
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case ETHTOOL_GRXRINGS:
|
||||
case ETHTOOL_GRXCLSRLCNT:
|
||||
case ETHTOOL_GRXCLSRULE:
|
||||
case ETHTOOL_SRXCLSRLINS:
|
||||
convert_out = true;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case ETHTOOL_SRXCLSRLDEL:
|
||||
buf_size += sizeof(struct ethtool_rxnfc);
|
||||
convert_in = true;
|
||||
|
@@ -561,7 +561,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct iov_iter msg_iter;
|
||||
char *kaddr = kmap(page);
|
||||
char *kaddr;
|
||||
struct kvec iov;
|
||||
int rc;
|
||||
|
||||
@@ -576,6 +576,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
|
||||
goto out;
|
||||
}
|
||||
|
||||
kaddr = kmap(page);
|
||||
iov.iov_base = kaddr + offset;
|
||||
iov.iov_len = size;
|
||||
iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
|
||||
|
@@ -935,7 +935,8 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
int ret = 0;
|
||||
int pending;
|
||||
|
||||
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
|
||||
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
|
||||
MSG_CMSG_COMPAT))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&tls_ctx->tx_lock);
|
||||
|
@@ -1032,7 +1032,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
|
||||
}
|
||||
|
||||
/* Connected sockets that can produce data can be written. */
|
||||
if (sk->sk_state == TCP_ESTABLISHED) {
|
||||
if (transport && sk->sk_state == TCP_ESTABLISHED) {
|
||||
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
|
||||
bool space_avail_now = false;
|
||||
int ret = transport->notify_poll_out(
|
||||
|
Reference in New Issue
Block a user