Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== Netfilter fixes for net The following patchset contains Netfilter fixes for your net tree: 1) Skip ip_sabotage_in() for packet making into the VRF driver, otherwise packets are dropped, from David Ahern. 2) Clang compilation warning uncovering typo in the nft_validate_register_store() call from nft_osf, from Stefan Agner. 3) Double sizeof netlink message length calculations in ctnetlink, from zhong jiang. 4) Missing rb_erase() on batch full in rbtree garbage collector, from Taehee Yoo. 5) Calm down compilation warning in nf_hook(), from Florian Westphal. 6) Missing check for non-null sk in xt_socket before validating netns procedence, from Flavio Leitner. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -215,6 +215,8 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
|
|||||||
break;
|
break;
|
||||||
case NFPROTO_ARP:
|
case NFPROTO_ARP:
|
||||||
#ifdef CONFIG_NETFILTER_FAMILY_ARP
|
#ifdef CONFIG_NETFILTER_FAMILY_ARP
|
||||||
|
if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
|
||||||
|
break;
|
||||||
hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
|
hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
|
||||||
#endif
|
#endif
|
||||||
break;
|
break;
|
||||||
|
@@ -835,7 +835,8 @@ static unsigned int ip_sabotage_in(void *priv,
|
|||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
const struct nf_hook_state *state)
|
const struct nf_hook_state *state)
|
||||||
{
|
{
|
||||||
if (skb->nf_bridge && !skb->nf_bridge->in_prerouting) {
|
if (skb->nf_bridge && !skb->nf_bridge->in_prerouting &&
|
||||||
|
!netif_is_l3_master(skb->dev)) {
|
||||||
state->okfn(state->net, state->sk, skb);
|
state->okfn(state->net, state->sk, skb);
|
||||||
return NF_STOLEN;
|
return NF_STOLEN;
|
||||||
}
|
}
|
||||||
|
@@ -1213,8 +1213,8 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
|
|||||||
#define TCP_NLATTR_SIZE ( \
|
#define TCP_NLATTR_SIZE ( \
|
||||||
NLA_ALIGN(NLA_HDRLEN + 1) + \
|
NLA_ALIGN(NLA_HDRLEN + 1) + \
|
||||||
NLA_ALIGN(NLA_HDRLEN + 1) + \
|
NLA_ALIGN(NLA_HDRLEN + 1) + \
|
||||||
NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \
|
NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
|
||||||
NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))))
|
NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
|
||||||
|
|
||||||
static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
|
static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
|
||||||
{
|
{
|
||||||
|
@@ -49,7 +49,7 @@ static int nft_osf_init(const struct nft_ctx *ctx,
|
|||||||
|
|
||||||
priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
|
priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
|
||||||
err = nft_validate_register_store(ctx, priv->dreg, NULL,
|
err = nft_validate_register_store(ctx, priv->dreg, NULL,
|
||||||
NFTA_DATA_VALUE, NFT_OSF_MAXGENRELEN);
|
NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@@ -355,12 +355,11 @@ cont:
|
|||||||
|
|
||||||
static void nft_rbtree_gc(struct work_struct *work)
|
static void nft_rbtree_gc(struct work_struct *work)
|
||||||
{
|
{
|
||||||
|
struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
|
||||||
struct nft_set_gc_batch *gcb = NULL;
|
struct nft_set_gc_batch *gcb = NULL;
|
||||||
struct rb_node *node, *prev = NULL;
|
|
||||||
struct nft_rbtree_elem *rbe;
|
|
||||||
struct nft_rbtree *priv;
|
struct nft_rbtree *priv;
|
||||||
|
struct rb_node *node;
|
||||||
struct nft_set *set;
|
struct nft_set *set;
|
||||||
int i;
|
|
||||||
|
|
||||||
priv = container_of(work, struct nft_rbtree, gc_work.work);
|
priv = container_of(work, struct nft_rbtree, gc_work.work);
|
||||||
set = nft_set_container_of(priv);
|
set = nft_set_container_of(priv);
|
||||||
@@ -371,7 +370,7 @@ static void nft_rbtree_gc(struct work_struct *work)
|
|||||||
rbe = rb_entry(node, struct nft_rbtree_elem, node);
|
rbe = rb_entry(node, struct nft_rbtree_elem, node);
|
||||||
|
|
||||||
if (nft_rbtree_interval_end(rbe)) {
|
if (nft_rbtree_interval_end(rbe)) {
|
||||||
prev = node;
|
rbe_end = rbe;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (!nft_set_elem_expired(&rbe->ext))
|
if (!nft_set_elem_expired(&rbe->ext))
|
||||||
@@ -379,29 +378,30 @@ static void nft_rbtree_gc(struct work_struct *work)
|
|||||||
if (nft_set_elem_mark_busy(&rbe->ext))
|
if (nft_set_elem_mark_busy(&rbe->ext))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (rbe_prev) {
|
||||||
|
rb_erase(&rbe_prev->node, &priv->root);
|
||||||
|
rbe_prev = NULL;
|
||||||
|
}
|
||||||
gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
|
gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
|
||||||
if (!gcb)
|
if (!gcb)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
atomic_dec(&set->nelems);
|
atomic_dec(&set->nelems);
|
||||||
nft_set_gc_batch_add(gcb, rbe);
|
nft_set_gc_batch_add(gcb, rbe);
|
||||||
|
rbe_prev = rbe;
|
||||||
|
|
||||||
if (prev) {
|
if (rbe_end) {
|
||||||
rbe = rb_entry(prev, struct nft_rbtree_elem, node);
|
|
||||||
atomic_dec(&set->nelems);
|
atomic_dec(&set->nelems);
|
||||||
nft_set_gc_batch_add(gcb, rbe);
|
nft_set_gc_batch_add(gcb, rbe_end);
|
||||||
prev = NULL;
|
rb_erase(&rbe_end->node, &priv->root);
|
||||||
|
rbe_end = NULL;
|
||||||
}
|
}
|
||||||
node = rb_next(node);
|
node = rb_next(node);
|
||||||
if (!node)
|
if (!node)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (gcb) {
|
if (rbe_prev)
|
||||||
for (i = 0; i < gcb->head.cnt; i++) {
|
rb_erase(&rbe_prev->node, &priv->root);
|
||||||
rbe = gcb->elems[i];
|
|
||||||
rb_erase(&rbe->node, &priv->root);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
write_seqcount_end(&priv->count);
|
write_seqcount_end(&priv->count);
|
||||||
write_unlock_bh(&priv->lock);
|
write_unlock_bh(&priv->lock);
|
||||||
|
|
||||||
|
@@ -56,7 +56,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
|
|||||||
struct sk_buff *pskb = (struct sk_buff *)skb;
|
struct sk_buff *pskb = (struct sk_buff *)skb;
|
||||||
struct sock *sk = skb->sk;
|
struct sock *sk = skb->sk;
|
||||||
|
|
||||||
if (!net_eq(xt_net(par), sock_net(sk)))
|
if (sk && !net_eq(xt_net(par), sock_net(sk)))
|
||||||
sk = NULL;
|
sk = NULL;
|
||||||
|
|
||||||
if (!sk)
|
if (!sk)
|
||||||
@@ -117,7 +117,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
|
|||||||
struct sk_buff *pskb = (struct sk_buff *)skb;
|
struct sk_buff *pskb = (struct sk_buff *)skb;
|
||||||
struct sock *sk = skb->sk;
|
struct sock *sk = skb->sk;
|
||||||
|
|
||||||
if (!net_eq(xt_net(par), sock_net(sk)))
|
if (sk && !net_eq(xt_net(par), sock_net(sk)))
|
||||||
sk = NULL;
|
sk = NULL;
|
||||||
|
|
||||||
if (!sk)
|
if (!sk)
|
||||||
|
Reference in New Issue
Block a user