Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== Netfilter/IPVS fixes for net The following patchset contains Netfilter/IPVS fixes for your net tree: 1) Reject non-null terminated helper names from xt_CT, from Gao Feng. 2) Fix KASAN splat due to out-of-bound access from commit phase, from Alexey Kodanev. 3) Missing conntrack hook registration on IPVS FTP helper, from Julian Anastasov. 4) Incorrect skbuff allocation size in bridge nft_reject, from Taehee Yoo. 5) Fix inverted check on packet xmit to non-local addresses, also from Julian. 6) Fix ebtables alignment compat problems, from Alin Nastac. 7) Hook mask checks are not correct in xt_set, from Serhey Popovych. 8) Fix timeout listing of element in ipsets, from Jozsef. 9) Cap maximum timeout value in ipset, also from Jozsef. 10) Don't allow family option for hash:mac sets, from Florent Fourcot. 11) Restrict ebtables to work with NFPROTO_BRIDGE targets only, this Florian. 12) Another bug reported by KASAN in the rbtree set backend, from Taehee Yoo. 13) Missing __IPS_MAX_BIT update doesn't include IPS_OFFLOAD_BIT. From Gao Feng. 14) Missing initialization of match/target in ebtables, from Florian Westphal. 15) Remove useless nft_dup.h file in include path, from C. Labbe. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -411,6 +411,12 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
|
||||
watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
|
||||
if (IS_ERR(watcher))
|
||||
return PTR_ERR(watcher);
|
||||
|
||||
if (watcher->family != NFPROTO_BRIDGE) {
|
||||
module_put(watcher->me);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
w->u.watcher = watcher;
|
||||
|
||||
par->target = watcher;
|
||||
@@ -709,6 +715,8 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
|
||||
}
|
||||
i = 0;
|
||||
|
||||
memset(&mtpar, 0, sizeof(mtpar));
|
||||
memset(&tgpar, 0, sizeof(tgpar));
|
||||
mtpar.net = tgpar.net = net;
|
||||
mtpar.table = tgpar.table = name;
|
||||
mtpar.entryinfo = tgpar.entryinfo = e;
|
||||
@@ -730,6 +738,13 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
|
||||
goto cleanup_watchers;
|
||||
}
|
||||
|
||||
/* Reject UNSPEC, xtables verdicts/return values are incompatible */
|
||||
if (target->family != NFPROTO_BRIDGE) {
|
||||
module_put(target->me);
|
||||
ret = -ENOENT;
|
||||
goto cleanup_watchers;
|
||||
}
|
||||
|
||||
t->u.target = target;
|
||||
if (t->u.target == &ebt_standard_target) {
|
||||
if (gap < sizeof(struct ebt_standard_target)) {
|
||||
@@ -1605,16 +1620,16 @@ struct compat_ebt_entry_mwt {
|
||||
compat_uptr_t ptr;
|
||||
} u;
|
||||
compat_uint_t match_size;
|
||||
compat_uint_t data[0];
|
||||
compat_uint_t data[0] __attribute__ ((aligned (__alignof__(struct compat_ebt_replace))));
|
||||
};
|
||||
|
||||
/* account for possible padding between match_size and ->data */
|
||||
static int ebt_compat_entry_padsize(void)
|
||||
{
|
||||
BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
|
||||
COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
|
||||
return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
|
||||
COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
|
||||
BUILD_BUG_ON(sizeof(struct ebt_entry_match) <
|
||||
sizeof(struct compat_ebt_entry_mwt));
|
||||
return (int) sizeof(struct ebt_entry_match) -
|
||||
sizeof(struct compat_ebt_entry_mwt);
|
||||
}
|
||||
|
||||
static int ebt_compat_match_offset(const struct xt_match *match,
|
||||
|
@@ -261,7 +261,7 @@ static void nft_reject_br_send_v6_unreach(struct net *net,
|
||||
if (!reject6_br_csum_ok(oldskb, hook))
|
||||
return;
|
||||
|
||||
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
|
||||
nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) +
|
||||
LL_MAX_HEADER + len, GFP_ATOMIC);
|
||||
if (!nskb)
|
||||
return;
|
||||
|
@@ -531,6 +531,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
|
||||
return -ENOMEM;
|
||||
|
||||
j = 0;
|
||||
memset(&mtpar, 0, sizeof(mtpar));
|
||||
mtpar.net = net;
|
||||
mtpar.table = name;
|
||||
mtpar.entryinfo = &e->ip;
|
||||
|
@@ -550,6 +550,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
|
||||
return -ENOMEM;
|
||||
|
||||
j = 0;
|
||||
memset(&mtpar, 0, sizeof(mtpar));
|
||||
mtpar.net = net;
|
||||
mtpar.table = name;
|
||||
mtpar.entryinfo = &e->ipv6;
|
||||
|
@@ -1234,7 +1234,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
|
||||
pr_debug("Create set %s with family %s\n",
|
||||
set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
|
||||
|
||||
#ifndef IP_SET_PROTO_UNDEF
|
||||
#ifdef IP_SET_PROTO_UNDEF
|
||||
if (set->family != NFPROTO_UNSPEC)
|
||||
return -IPSET_ERR_INVALID_FAMILY;
|
||||
#else
|
||||
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
|
||||
return -IPSET_ERR_INVALID_FAMILY;
|
||||
#endif
|
||||
|
@@ -839,6 +839,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
|
||||
* For now only for NAT!
|
||||
*/
|
||||
ip_vs_rs_hash(ipvs, dest);
|
||||
/* FTP-NAT requires conntrack for mangling */
|
||||
if (svc->port == FTPPORT)
|
||||
ip_vs_register_conntrack(svc);
|
||||
}
|
||||
atomic_set(&dest->conn_flags, conn_flags);
|
||||
|
||||
@@ -1462,6 +1465,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
|
||||
*/
|
||||
static void ip_vs_unlink_service(struct ip_vs_service *svc, bool cleanup)
|
||||
{
|
||||
ip_vs_unregister_conntrack(svc);
|
||||
/* Hold svc to avoid double release from dest_trash */
|
||||
atomic_inc(&svc->refcnt);
|
||||
/*
|
||||
|
@@ -168,7 +168,7 @@ static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb,
|
||||
bool new_rt_is_local)
|
||||
{
|
||||
bool rt_mode_allow_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL);
|
||||
bool rt_mode_allow_non_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL);
|
||||
bool rt_mode_allow_non_local = !!(rt_mode & IP_VS_RT_MODE_NON_LOCAL);
|
||||
bool rt_mode_allow_redirect = !!(rt_mode & IP_VS_RT_MODE_RDR);
|
||||
bool source_is_loopback;
|
||||
bool old_rt_is_local;
|
||||
|
@@ -2890,12 +2890,13 @@ static struct nft_set *nft_set_lookup_byid(const struct net *net,
|
||||
u32 id = ntohl(nla_get_be32(nla));
|
||||
|
||||
list_for_each_entry(trans, &net->nft.commit_list, list) {
|
||||
struct nft_set *set = nft_trans_set(trans);
|
||||
if (trans->msg_type == NFT_MSG_NEWSET) {
|
||||
struct nft_set *set = nft_trans_set(trans);
|
||||
|
||||
if (trans->msg_type == NFT_MSG_NEWSET &&
|
||||
id == nft_trans_set_id(trans) &&
|
||||
nft_active_genmask(set, genmask))
|
||||
return set;
|
||||
if (id == nft_trans_set_id(trans) &&
|
||||
nft_active_genmask(set, genmask))
|
||||
return set;
|
||||
}
|
||||
}
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
@@ -66,7 +66,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
|
||||
parent = rcu_dereference_raw(parent->rb_left);
|
||||
if (interval &&
|
||||
nft_rbtree_equal(set, this, interval) &&
|
||||
nft_rbtree_interval_end(this) &&
|
||||
nft_rbtree_interval_end(rbe) &&
|
||||
!nft_rbtree_interval_end(interval))
|
||||
continue;
|
||||
interval = rbe;
|
||||
|
@@ -245,12 +245,22 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
|
||||
}
|
||||
|
||||
if (info->helper[0]) {
|
||||
if (strnlen(info->helper, sizeof(info->helper)) == sizeof(info->helper)) {
|
||||
ret = -ENAMETOOLONG;
|
||||
goto err3;
|
||||
}
|
||||
|
||||
ret = xt_ct_set_helper(ct, info->helper, par);
|
||||
if (ret < 0)
|
||||
goto err3;
|
||||
}
|
||||
|
||||
if (info->timeout[0]) {
|
||||
if (strnlen(info->timeout, sizeof(info->timeout)) == sizeof(info->timeout)) {
|
||||
ret = -ENAMETOOLONG;
|
||||
goto err4;
|
||||
}
|
||||
|
||||
ret = xt_ct_set_timeout(ct, par, info->timeout);
|
||||
if (ret < 0)
|
||||
goto err4;
|
||||
|
@@ -372,8 +372,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
|
||||
/* Normalize to fit into jiffies */
|
||||
if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
|
||||
add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC)
|
||||
add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC;
|
||||
add_opt.ext.timeout > IPSET_MAX_TIMEOUT)
|
||||
add_opt.ext.timeout = IPSET_MAX_TIMEOUT;
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_add(info->add_set.index, skb, par, &add_opt);
|
||||
if (info->del_set.index != IPSET_INVALID_ID)
|
||||
@@ -407,8 +407,8 @@ set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
|
||||
/* Normalize to fit into jiffies */
|
||||
if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
|
||||
add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC)
|
||||
add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC;
|
||||
add_opt.ext.timeout > IPSET_MAX_TIMEOUT)
|
||||
add_opt.ext.timeout = IPSET_MAX_TIMEOUT;
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_add(info->add_set.index, skb, par, &add_opt);
|
||||
if (info->del_set.index != IPSET_INVALID_ID)
|
||||
@@ -470,7 +470,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
|
||||
}
|
||||
if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) |
|
||||
(info->flags & IPSET_FLAG_MAP_SKBQUEUE)) &&
|
||||
!(par->hook_mask & (1 << NF_INET_FORWARD |
|
||||
(par->hook_mask & ~(1 << NF_INET_FORWARD |
|
||||
1 << NF_INET_LOCAL_OUT |
|
||||
1 << NF_INET_POST_ROUTING))) {
|
||||
pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
|
||||
|
Reference in New Issue
Block a user