Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix crash in ipvs tot_stats estimator, from Julian Anastasov.

 2) Fix OOPS in nf_nat on netns removal, from Florian Westphal.

 3) Really really really fix locking issues in slip and slcan tty write
    wakeups, from Tyler Hall.

 4) Fix checksum offloading in fec driver, from Fugang Duan.

 5) Off by one in BPF instruction limit test, from Kees Cook.

 6) Need to clear all TSO capability flags when doing software TSO in
    tg3 driver, from Prashant Sreedharan.

 7) Fix memory leak in vlan_reorder_header() error path, from Li
    RongQing.

 8) Fix various bugs in xen-netfront and xen-netback multiqueue support,
    from David Vrabel and Wei Liu.

 9) Fix deadlock in cxgb4 driver, from Li RongQing.

10) Prevent double free of no-cache DST entries, from Eric Dumazet.

11) Bad csum_start handling in skb_segment() leads to crashes when
    forwarding, from Tom Herbert.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (76 commits)
  net: fix setting csum_start in skb_segment()
  ipv4: fix dst race in sk_dst_get()
  net: filter: Use kcalloc/kmalloc_array to allocate arrays
  trivial: net: filter: Change kerneldoc parameter order
  trivial: net: filter: Fix typo in comment
  net: allwinner: emac: Add missing free_irq
  cxgb4: use dev_port to identify ports
  xen-netback: bookkeep number of active queues in our own module
  tg3: Change nvram command timeout value to 50ms
  cxgb4: Not need to hold the adap_rcu_lock lock when read adap_rcu_list
  be2net: fix qnq mode detection on VFs
  of: mdio: fixup of_phy_register_fixed_link parsing of new bindings
  at86rf230: fix irq setup
  net: phy: at803x: fix coccinelle warnings
  net/mlx4_core: Fix the error flow when probing with invalid VF configuration
  tulip: Poll link status more frequently for Comet chips
  net: huawei_cdc_ncm: increase command buffer size
  drivers: net: cpsw: fix dual EMAC stall when connected to same switch
  xen-netfront: recreate queues correctly when reconnecting
  xen-netfront: fix oops when disconnected from backend
  ...
This commit is contained in:
Linus Torvalds
2014-06-25 21:08:24 -07:00
68 changed files with 749 additions and 374 deletions

View File

@@ -114,8 +114,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_proto);
static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
{
if (skb_cow(skb, skb_headroom(skb)) < 0)
if (skb_cow(skb, skb_headroom(skb)) < 0) {
kfree_skb(skb);
return NULL;
}
memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
skb->mac_header += VLAN_HLEN;
return skb;

View File

@@ -610,11 +610,6 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
if (hci_update_random_address(req, false, &own_addr_type))
return;
/* Save the address type used for this connnection attempt so we able
* to retrieve this information if we need it.
*/
conn->src_type = own_addr_type;
cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
cp.scan_window = cpu_to_le16(hdev->le_scan_window);
bacpy(&cp.peer_addr, &conn->dst);
@@ -894,7 +889,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
/* If we're already encrypted set the REAUTH_PEND flag,
* otherwise set the ENCRYPT_PEND.
*/
if (conn->key_type != 0xff)
if (conn->link_mode & HCI_LM_ENCRYPT)
set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
else
set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);

View File

@@ -48,6 +48,10 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
wake_up_bit(&hdev->flags, HCI_INQUIRY);
hci_dev_lock(hdev);
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
hci_dev_unlock(hdev);
hci_conn_check_pending(hdev);
}
@@ -3537,7 +3541,11 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
cp.authentication = conn->auth_type;
/* Request MITM protection if our IO caps allow it
* except for the no-bonding case
* except for the no-bonding case.
* conn->auth_type is not updated here since
* that might cause the user confirmation to be
* rejected in case the remote doesn't have the
* IO capabilities for MITM.
*/
if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
cp.authentication != HCI_AT_NO_BONDING)
@@ -3628,8 +3636,11 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
/* If we're not the initiators request authorization to
* proceed from user space (mgmt_user_confirm with
* confirm_hint set to 1). */
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
* confirm_hint set to 1). The exception is if neither
* side had MITM in which case we do auto-accept.
*/
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
(loc_mitm || rem_mitm)) {
BT_DBG("Confirming auto-accept as acceptor");
confirm_hint = 1;
goto confirm;

View File

@@ -1663,7 +1663,13 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
kfree_skb(conn->rx_skb);
skb_queue_purge(&conn->pending_rx);
flush_work(&conn->pending_rx_work);
/* We can not call flush_work(&conn->pending_rx_work) here since we
* might block if we are running on a worker from the same workqueue
* pending_rx_work is waiting on.
*/
if (work_pending(&conn->pending_rx_work))
cancel_work_sync(&conn->pending_rx_work);
l2cap_unregister_all_users(conn);

View File

@@ -787,11 +787,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
/*change security for LE channels */
if (chan->scid == L2CAP_CID_ATT) {
if (!conn->hcon->out) {
err = -EINVAL;
break;
}
if (smp_conn_security(conn->hcon, sec.level))
break;
sk->sk_state = BT_CONFIG;

View File

@@ -1047,6 +1047,43 @@ static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
}
}
static void hci_stop_discovery(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_remote_name_req_cancel cp;
struct inquiry_entry *e;
switch (hdev->discovery.state) {
case DISCOVERY_FINDING:
if (test_bit(HCI_INQUIRY, &hdev->flags)) {
hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
} else {
cancel_delayed_work(&hdev->le_scan_disable);
hci_req_add_le_scan_disable(req);
}
break;
case DISCOVERY_RESOLVING:
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
NAME_PENDING);
if (!e)
return;
bacpy(&cp.bdaddr, &e->data.bdaddr);
hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
&cp);
break;
default:
/* Passive scanning */
if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
hci_req_add_le_scan_disable(req);
break;
}
}
static int clean_up_hci_state(struct hci_dev *hdev)
{
struct hci_request req;
@@ -1063,9 +1100,7 @@ static int clean_up_hci_state(struct hci_dev *hdev)
if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
disable_advertising(&req);
if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
hci_req_add_le_scan_disable(&req);
}
hci_stop_discovery(&req);
list_for_each_entry(conn, &hdev->conn_hash.list, list) {
struct hci_cp_disconnect dc;
@@ -2996,8 +3031,13 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
}
if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
/* Continue with pairing via SMP */
/* Continue with pairing via SMP. The hdev lock must be
* released as SMP may try to recquire it for crypto
* purposes.
*/
hci_dev_unlock(hdev);
err = smp_user_confirm_reply(conn, mgmt_op, passkey);
hci_dev_lock(hdev);
if (!err)
err = cmd_complete(sk, hdev->id, mgmt_op,
@@ -3574,8 +3614,6 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
{
struct mgmt_cp_stop_discovery *mgmt_cp = data;
struct pending_cmd *cmd;
struct hci_cp_remote_name_req_cancel cp;
struct inquiry_entry *e;
struct hci_request req;
int err;
@@ -3605,52 +3643,22 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
hci_req_init(&req, hdev);
switch (hdev->discovery.state) {
case DISCOVERY_FINDING:
if (test_bit(HCI_INQUIRY, &hdev->flags)) {
hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
} else {
cancel_delayed_work(&hdev->le_scan_disable);
hci_stop_discovery(&req);
hci_req_add_le_scan_disable(&req);
}
break;
case DISCOVERY_RESOLVING:
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
NAME_PENDING);
if (!e) {
mgmt_pending_remove(cmd);
err = cmd_complete(sk, hdev->id,
MGMT_OP_STOP_DISCOVERY, 0,
&mgmt_cp->type,
sizeof(mgmt_cp->type));
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
goto unlock;
}
bacpy(&cp.bdaddr, &e->data.bdaddr);
hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
&cp);
break;
default:
BT_DBG("unknown discovery state %u", hdev->discovery.state);
mgmt_pending_remove(cmd);
err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
MGMT_STATUS_FAILED, &mgmt_cp->type,
sizeof(mgmt_cp->type));
err = hci_req_run(&req, stop_discovery_complete);
if (!err) {
hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
goto unlock;
}
err = hci_req_run(&req, stop_discovery_complete);
if (err < 0)
mgmt_pending_remove(cmd);
else
hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
mgmt_pending_remove(cmd);
/* If no HCI commands were sent we're done */
if (err == -ENODATA) {
err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
&mgmt_cp->type, sizeof(mgmt_cp->type));
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
}
unlock:
hci_dev_unlock(hdev);

View File

@@ -544,7 +544,7 @@ static u8 smp_random(struct smp_chan *smp)
hci_le_start_enc(hcon, ediv, rand, stk);
hcon->enc_key_size = smp->enc_key_size;
} else {
u8 stk[16];
u8 stk[16], auth;
__le64 rand = 0;
__le16 ediv = 0;
@@ -556,8 +556,13 @@ static u8 smp_random(struct smp_chan *smp)
memset(stk + smp->enc_key_size, 0,
SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
if (hcon->pending_sec_level == BT_SECURITY_HIGH)
auth = 1;
else
auth = 0;
hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
HCI_SMP_STK_SLAVE, 0, stk, smp->enc_key_size,
HCI_SMP_STK_SLAVE, auth, stk, smp->enc_key_size,
ediv, rand);
}

View File

@@ -269,6 +269,15 @@ again:
}
EXPORT_SYMBOL(dst_destroy);
static void dst_destroy_rcu(struct rcu_head *head)
{
struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
dst = dst_destroy(dst);
if (dst)
__dst_free(dst);
}
void dst_release(struct dst_entry *dst)
{
if (dst) {
@@ -276,11 +285,8 @@ void dst_release(struct dst_entry *dst)
newrefcnt = atomic_dec_return(&dst->__refcnt);
WARN_ON(newrefcnt < 0);
if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
dst = dst_destroy(dst);
if (dst)
__dst_free(dst);
}
if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
call_rcu(&dst->rcu_head, dst_destroy_rcu);
}
}
EXPORT_SYMBOL(dst_release);

View File

@@ -840,11 +840,11 @@ int sk_convert_filter(struct sock_filter *prog, int len,
BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
if (len <= 0 || len >= BPF_MAXINSNS)
if (len <= 0 || len > BPF_MAXINSNS)
return -EINVAL;
if (new_prog) {
addrs = kzalloc(len * sizeof(*addrs), GFP_KERNEL);
addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
if (!addrs)
return -ENOMEM;
}
@@ -1101,7 +1101,7 @@ static int check_load_and_stores(struct sock_filter *filter, int flen)
BUILD_BUG_ON(BPF_MEMWORDS > 16);
masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
if (!masks)
return -ENOMEM;
@@ -1382,7 +1382,7 @@ static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
if (fp_new) {
*fp_new = *fp;
/* As we're kepping orig_prog in fp_new along,
/* As we're keeping orig_prog in fp_new along,
* we need to make sure we're not evicting it
* from the old fp.
*/
@@ -1524,8 +1524,8 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
/**
* sk_unattached_filter_create - create an unattached filter
* @fprog: the filter program
* @pfp: the unattached filter that is created
* @fprog: the filter program
*
* Create a filter independent of any socket. We first run some
* sanity checks on it to make sure it does not explode on us later.

View File

@@ -2993,7 +2993,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
skb_put(nskb, len),
len, 0);
SKB_GSO_CB(nskb)->csum_start =
skb_headroom(nskb) + offset;
skb_headroom(nskb) + doffset;
continue;
}

View File

@@ -73,12 +73,7 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
{
struct dst_entry *old_dst;
if (dst) {
if (dst->flags & DST_NOCACHE)
dst = NULL;
else
dst_clone(dst);
}
dst_clone(dst);
old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
dst_release(old_dst);
}
@@ -108,13 +103,14 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
rcu_read_lock();
dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
if (dst && !atomic_inc_not_zero(&dst->__refcnt))
dst = NULL;
if (dst) {
if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
rcu_read_unlock();
tunnel_dst_reset(t);
return NULL;
dst_release(dst);
dst = NULL;
}
dst_hold(dst);
}
rcu_read_unlock();
return (struct rtable *)dst;

View File

@@ -131,7 +131,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
struct dst_entry *dst,
struct request_sock *req)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_sock *tp;
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
struct sock *child;

View File

@@ -1162,7 +1162,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
unsigned int new_len = (pkt_len / mss) * mss;
if (!in_sack && new_len < pkt_len) {
new_len += mss;
if (new_len > skb->len)
if (new_len >= skb->len)
return 0;
}
pkt_len = new_len;

View File

@@ -3778,6 +3778,7 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
cancel_delayed_work_sync(&ipvs->defense_work);
cancel_work_sync(&ipvs->defense_work.work);
unregister_net_sysctl_table(ipvs->sysctl_hdr);
ip_vs_stop_estimator(net, &ipvs->tot_stats);
}
#else
@@ -3840,7 +3841,6 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
struct netns_ipvs *ipvs = net_ipvs(net);
ip_vs_trash_cleanup(net);
ip_vs_stop_estimator(net, &ipvs->tot_stats);
ip_vs_control_net_cleanup_sysctl(net);
remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
remove_proc_entry("ip_vs_stats", net->proc_net);

View File

@@ -596,6 +596,9 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
#endif
#ifdef CONFIG_NF_CONNTRACK_MARK
+ nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
#endif
#ifdef CONFIG_NF_CONNTRACK_ZONES
+ nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
#endif
+ ctnetlink_proto_size(ct)
+ ctnetlink_label_size(ct)
@@ -1150,7 +1153,7 @@ static int ctnetlink_done_list(struct netlink_callback *cb)
static int
ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
{
struct nf_conn *ct, *last = NULL;
struct nf_conn *ct, *last;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
@@ -1163,8 +1166,7 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
if (cb->args[2])
return 0;
if (cb->args[0] == nr_cpu_ids)
return 0;
last = (struct nf_conn *)cb->args[1];
for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
struct ct_pcpu *pcpu;
@@ -1174,7 +1176,6 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
spin_lock_bh(&pcpu->lock);
last = (struct nf_conn *)cb->args[1];
list = dying ? &pcpu->dying : &pcpu->unconfirmed;
restart:
hlist_nulls_for_each_entry(h, n, list, hnnode) {
@@ -1193,7 +1194,9 @@ restart:
ct);
rcu_read_unlock();
if (res < 0) {
nf_conntrack_get(&ct->ct_general);
if (!atomic_inc_not_zero(&ct->ct_general.use))
continue;
cb->args[0] = cpu;
cb->args[1] = (unsigned long)ct;
spin_unlock_bh(&pcpu->lock);
goto out;
@@ -1202,10 +1205,10 @@ restart:
if (cb->args[1]) {
cb->args[1] = 0;
goto restart;
} else
cb->args[2] = 1;
}
spin_unlock_bh(&pcpu->lock);
}
cb->args[2] = 1;
out:
if (last)
nf_ct_put(last);
@@ -2039,6 +2042,9 @@ ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
#endif
#ifdef CONFIG_NF_CONNTRACK_MARK
+ nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
#endif
#ifdef CONFIG_NF_CONNTRACK_ZONES
+ nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
#endif
+ ctnetlink_proto_size(ct)
;

View File

@@ -525,6 +525,39 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
return i->status & IPS_NAT_MASK ? 1 : 0;
}
static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
{
struct nf_conn_nat *nat = nfct_nat(ct);
if (nf_nat_proto_remove(ct, data))
return 1;
if (!nat || !nat->ct)
return 0;
/* This netns is being destroyed, and conntrack has nat null binding.
* Remove it from bysource hash, as the table will be freed soon.
*
* Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
* will delete entry from already-freed table.
*/
if (!del_timer(&ct->timeout))
return 1;
spin_lock_bh(&nf_nat_lock);
hlist_del_rcu(&nat->bysource);
ct->status &= ~IPS_NAT_DONE_MASK;
nat->ct = NULL;
spin_unlock_bh(&nf_nat_lock);
add_timer(&ct->timeout);
/* don't delete conntrack. Although that would make things a lot
* simpler, we'd end up flushing all conntracks on nat rmmod.
*/
return 0;
}
static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
{
struct nf_nat_proto_clean clean = {
@@ -795,7 +828,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
{
struct nf_nat_proto_clean clean = {};
nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0);
nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
synchronize_rcu();
nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
}

View File

@@ -1730,6 +1730,9 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
if (!create || nlh->nlmsg_flags & NLM_F_REPLACE)
return -EINVAL;
handle = nf_tables_alloc_handle(table);
if (chain->use == UINT_MAX)
return -EOVERFLOW;
}
if (nla[NFTA_RULE_POSITION]) {
@@ -1789,14 +1792,15 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
if (nlh->nlmsg_flags & NLM_F_REPLACE) {
if (nft_rule_is_active_next(net, old_rule)) {
trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE,
trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
old_rule);
if (trans == NULL) {
err = -ENOMEM;
goto err2;
}
nft_rule_disactivate_next(net, old_rule);
list_add_tail(&rule->list, &old_rule->list);
chain->use--;
list_add_tail_rcu(&rule->list, &old_rule->list);
} else {
err = -ENOENT;
goto err2;
@@ -1826,6 +1830,7 @@ err3:
list_del_rcu(&nft_trans_rule(trans)->list);
nft_rule_clear(net, nft_trans_rule(trans));
nft_trans_destroy(trans);
chain->use++;
}
err2:
nf_tables_rule_destroy(&ctx, rule);
@@ -2845,7 +2850,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
goto nla_put_failure;
nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = NFPROTO_UNSPEC;
nfmsg->nfgen_family = ctx.afi->family;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = 0;

View File

@@ -195,6 +195,15 @@ static void
nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
struct xt_target *target = expr->ops->data;
void *info = nft_expr_priv(expr);
struct xt_tgdtor_param par;
par.net = ctx->net;
par.target = target;
par.targinfo = info;
par.family = ctx->afi->family;
if (par.target->destroy != NULL)
par.target->destroy(&par);
module_put(target->me);
}
@@ -382,6 +391,15 @@ static void
nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
struct xt_match *match = expr->ops->data;
void *info = nft_expr_priv(expr);
struct xt_mtdtor_param par;
par.net = ctx->net;
par.match = match;
par.matchinfo = info;
par.family = ctx->afi->family;
if (par.match->destroy != NULL)
par.match->destroy(&par);
module_put(match->me);
}

View File

@@ -175,12 +175,14 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
if (nla_put_be32(skb,
NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max)))
goto nla_put_failure;
if (nla_put_be32(skb,
NFTA_NAT_REG_PROTO_MIN, htonl(priv->sreg_proto_min)))
goto nla_put_failure;
if (nla_put_be32(skb,
NFTA_NAT_REG_PROTO_MAX, htonl(priv->sreg_proto_max)))
goto nla_put_failure;
if (priv->sreg_proto_min) {
if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN,
htonl(priv->sreg_proto_min)))
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX,
htonl(priv->sreg_proto_max)))
goto nla_put_failure;
}
return 0;
nla_put_failure:

View File

@@ -321,41 +321,40 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
loff_t *ppos)
{
struct net *net = current->nsproxy->net_ns;
char tmp[8];
struct ctl_table tbl;
int ret;
int changed = 0;
bool changed = false;
char *none = "none";
char tmp[8];
int ret;
memset(&tbl, 0, sizeof(struct ctl_table));
if (write) {
tbl.data = tmp;
tbl.maxlen = 8;
tbl.maxlen = sizeof(tmp);
} else {
tbl.data = net->sctp.sctp_hmac_alg ? : none;
tbl.maxlen = strlen(tbl.data);
}
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
if (write) {
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
if (write && ret == 0) {
#ifdef CONFIG_CRYPTO_MD5
if (!strncmp(tmp, "md5", 3)) {
net->sctp.sctp_hmac_alg = "md5";
changed = 1;
changed = true;
}
#endif
#ifdef CONFIG_CRYPTO_SHA1
if (!strncmp(tmp, "sha1", 4)) {
net->sctp.sctp_hmac_alg = "sha1";
changed = 1;
changed = true;
}
#endif
if (!strncmp(tmp, "none", 4)) {
net->sctp.sctp_hmac_alg = NULL;
changed = 1;
changed = true;
}
if (!changed)
ret = -EINVAL;
}
@@ -368,11 +367,10 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
loff_t *ppos)
{
struct net *net = current->nsproxy->net_ns;
int new_value;
struct ctl_table tbl;
unsigned int min = *(unsigned int *) ctl->extra1;
unsigned int max = *(unsigned int *) ctl->extra2;
int ret;
struct ctl_table tbl;
int ret, new_value;
memset(&tbl, 0, sizeof(struct ctl_table));
tbl.maxlen = sizeof(unsigned int);
@@ -381,12 +379,15 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
tbl.data = &new_value;
else
tbl.data = &net->sctp.rto_min;
ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
if (write) {
if (ret || new_value > max || new_value < min)
if (write && ret == 0) {
if (new_value > max || new_value < min)
return -EINVAL;
net->sctp.rto_min = new_value;
}
return ret;
}
@@ -395,11 +396,10 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
loff_t *ppos)
{
struct net *net = current->nsproxy->net_ns;
int new_value;
struct ctl_table tbl;
unsigned int min = *(unsigned int *) ctl->extra1;
unsigned int max = *(unsigned int *) ctl->extra2;
int ret;
struct ctl_table tbl;
int ret, new_value;
memset(&tbl, 0, sizeof(struct ctl_table));
tbl.maxlen = sizeof(unsigned int);
@@ -408,12 +408,15 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
tbl.data = &new_value;
else
tbl.data = &net->sctp.rto_max;
ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
if (write) {
if (ret || new_value > max || new_value < min)
if (write && ret == 0) {
if (new_value > max || new_value < min)
return -EINVAL;
net->sctp.rto_max = new_value;
}
return ret;
}
@@ -444,8 +447,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
tbl.data = &net->sctp.auth_enable;
ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
if (write) {
if (write && ret == 0) {
struct sock *sk = net->sctp.ctl_sock;
net->sctp.auth_enable = new_value;