Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
@@ -18,6 +18,7 @@
|
||||
#include <net/sock.h>
|
||||
|
||||
#include "br_private.h"
|
||||
#include "br_private_stp.h"
|
||||
|
||||
static inline size_t br_nlmsg_size(void)
|
||||
{
|
||||
@@ -188,6 +189,11 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
|
||||
|
||||
p->state = new_state;
|
||||
br_log_state(p);
|
||||
|
||||
spin_lock_bh(&p->br->lock);
|
||||
br_port_state_selection(p->br);
|
||||
spin_unlock_bh(&p->br->lock);
|
||||
|
||||
br_ifinfo_notify(RTM_NEWLINK, p);
|
||||
|
||||
return 0;
|
||||
|
@@ -399,25 +399,24 @@ void br_port_state_selection(struct net_bridge *br)
|
||||
struct net_bridge_port *p;
|
||||
unsigned int liveports = 0;
|
||||
|
||||
/* Don't change port states if userspace is handling STP */
|
||||
if (br->stp_enabled == BR_USER_STP)
|
||||
return;
|
||||
|
||||
list_for_each_entry(p, &br->port_list, list) {
|
||||
if (p->state == BR_STATE_DISABLED)
|
||||
continue;
|
||||
|
||||
if (p->port_no == br->root_port) {
|
||||
p->config_pending = 0;
|
||||
p->topology_change_ack = 0;
|
||||
br_make_forwarding(p);
|
||||
} else if (br_is_designated_port(p)) {
|
||||
del_timer(&p->message_age_timer);
|
||||
br_make_forwarding(p);
|
||||
} else {
|
||||
p->config_pending = 0;
|
||||
p->topology_change_ack = 0;
|
||||
br_make_blocking(p);
|
||||
/* Don't change port states if userspace is handling STP */
|
||||
if (br->stp_enabled != BR_USER_STP) {
|
||||
if (p->port_no == br->root_port) {
|
||||
p->config_pending = 0;
|
||||
p->topology_change_ack = 0;
|
||||
br_make_forwarding(p);
|
||||
} else if (br_is_designated_port(p)) {
|
||||
del_timer(&p->message_age_timer);
|
||||
br_make_forwarding(p);
|
||||
} else {
|
||||
p->config_pending = 0;
|
||||
p->topology_change_ack = 0;
|
||||
br_make_blocking(p);
|
||||
}
|
||||
}
|
||||
|
||||
if (p->state == BR_STATE_FORWARDING)
|
||||
|
@@ -244,7 +244,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
|
||||
ceph_pagelist_init(req->r_trail);
|
||||
}
|
||||
/* create request message; allow space for oid */
|
||||
msg_size += 40;
|
||||
msg_size += MAX_OBJ_NAME_SIZE;
|
||||
if (snapc)
|
||||
msg_size += sizeof(u64) * snapc->num_snaps;
|
||||
if (use_mempool)
|
||||
|
@@ -1387,7 +1387,7 @@ rollback:
|
||||
for_each_net(net) {
|
||||
for_each_netdev(net, dev) {
|
||||
if (dev == last)
|
||||
break;
|
||||
goto outroll;
|
||||
|
||||
if (dev->flags & IFF_UP) {
|
||||
nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
|
||||
@@ -1398,6 +1398,7 @@ rollback:
|
||||
}
|
||||
}
|
||||
|
||||
outroll:
|
||||
raw_notifier_chain_unregister(&netdev_chain, nb);
|
||||
goto unlock;
|
||||
}
|
||||
@@ -4209,6 +4210,12 @@ static int dev_seq_open(struct inode *inode, struct file *file)
|
||||
sizeof(struct dev_iter_state));
|
||||
}
|
||||
|
||||
int dev_seq_open_ops(struct inode *inode, struct file *file,
|
||||
const struct seq_operations *ops)
|
||||
{
|
||||
return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
|
||||
}
|
||||
|
||||
static const struct file_operations dev_seq_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = dev_seq_open,
|
||||
|
@@ -696,8 +696,7 @@ static const struct seq_operations dev_mc_seq_ops = {
|
||||
|
||||
static int dev_mc_seq_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open_net(inode, file, &dev_mc_seq_ops,
|
||||
sizeof(struct seq_net_private));
|
||||
return dev_seq_open_ops(inode, file, &dev_mc_seq_ops);
|
||||
}
|
||||
|
||||
static const struct file_operations dev_mc_seq_fops = {
|
||||
|
@@ -36,16 +36,13 @@ static void dn_slow_timer(unsigned long arg);
|
||||
|
||||
void dn_start_slow_timer(struct sock *sk)
|
||||
{
|
||||
sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
|
||||
sk->sk_timer.function = dn_slow_timer;
|
||||
sk->sk_timer.data = (unsigned long)sk;
|
||||
|
||||
add_timer(&sk->sk_timer);
|
||||
setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk);
|
||||
sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
|
||||
}
|
||||
|
||||
void dn_stop_slow_timer(struct sock *sk)
|
||||
{
|
||||
del_timer(&sk->sk_timer);
|
||||
sk_stop_timer(sk, &sk->sk_timer);
|
||||
}
|
||||
|
||||
static void dn_slow_timer(unsigned long arg)
|
||||
@@ -53,12 +50,10 @@ static void dn_slow_timer(unsigned long arg)
|
||||
struct sock *sk = (struct sock *)arg;
|
||||
struct dn_scp *scp = DN_SK(sk);
|
||||
|
||||
sock_hold(sk);
|
||||
bh_lock_sock(sk);
|
||||
|
||||
if (sock_owned_by_user(sk)) {
|
||||
sk->sk_timer.expires = jiffies + HZ / 10;
|
||||
add_timer(&sk->sk_timer);
|
||||
sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -100,9 +95,7 @@ static void dn_slow_timer(unsigned long arg)
|
||||
scp->keepalive_fxn(sk);
|
||||
}
|
||||
|
||||
sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
|
||||
|
||||
add_timer(&sk->sk_timer);
|
||||
sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
|
||||
out:
|
||||
bh_unlock_sock(sk);
|
||||
sock_put(sk);
|
||||
|
@@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int old_value = *(int *)ctl->data;
|
||||
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
|
||||
int new_value = *(int *)ctl->data;
|
||||
|
||||
if (write) {
|
||||
struct ipv4_devconf *cnf = ctl->extra1;
|
||||
@@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
|
||||
|
||||
if (cnf == net->ipv4.devconf_dflt)
|
||||
devinet_copy_dflt_conf(net, i);
|
||||
if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
|
||||
if ((new_value == 0) && (old_value != 0))
|
||||
rt_cache_flush(net, 0);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@@ -64,7 +64,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
|
||||
/* Change in oif may mean change in hh_len. */
|
||||
hh_len = skb_dst(skb)->dev->hard_header_len;
|
||||
if (skb_headroom(skb) < hh_len &&
|
||||
pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
|
||||
pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
|
||||
0, GFP_ATOMIC))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
|
@@ -130,6 +130,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
|
||||
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
|
||||
static int ip_rt_min_advmss __read_mostly = 256;
|
||||
static int rt_chain_length_max __read_mostly = 20;
|
||||
static int redirect_genid;
|
||||
|
||||
/*
|
||||
* Interface to generic destination cache.
|
||||
@@ -415,9 +416,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
|
||||
else {
|
||||
struct rtable *r = v;
|
||||
struct neighbour *n;
|
||||
int len;
|
||||
int len, HHUptod;
|
||||
|
||||
rcu_read_lock();
|
||||
n = dst_get_neighbour(&r->dst);
|
||||
HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
|
||||
rcu_read_unlock();
|
||||
|
||||
seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
|
||||
"%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
|
||||
r->dst.dev ? r->dst.dev->name : "*",
|
||||
@@ -431,7 +436,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
|
||||
dst_metric(&r->dst, RTAX_RTTVAR)),
|
||||
r->rt_key_tos,
|
||||
-1,
|
||||
(n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0,
|
||||
HHUptod,
|
||||
r->rt_spec_dst, &len);
|
||||
|
||||
seq_printf(seq, "%*s\n", 127 - len, "");
|
||||
@@ -836,6 +841,7 @@ static void rt_cache_invalidate(struct net *net)
|
||||
|
||||
get_random_bytes(&shuffle, sizeof(shuffle));
|
||||
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
|
||||
redirect_genid++;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1385,8 +1391,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
|
||||
|
||||
peer = rt->peer;
|
||||
if (peer) {
|
||||
if (peer->redirect_learned.a4 != new_gw) {
|
||||
if (peer->redirect_learned.a4 != new_gw ||
|
||||
peer->redirect_genid != redirect_genid) {
|
||||
peer->redirect_learned.a4 = new_gw;
|
||||
peer->redirect_genid = redirect_genid;
|
||||
atomic_inc(&__rt_peer_genid);
|
||||
}
|
||||
check_peer_redir(&rt->dst, peer);
|
||||
@@ -1679,12 +1687,8 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
|
||||
}
|
||||
|
||||
|
||||
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
|
||||
static struct rtable *ipv4_validate_peer(struct rtable *rt)
|
||||
{
|
||||
struct rtable *rt = (struct rtable *) dst;
|
||||
|
||||
if (rt_is_expired(rt))
|
||||
return NULL;
|
||||
if (rt->rt_peer_genid != rt_peer_genid()) {
|
||||
struct inet_peer *peer;
|
||||
|
||||
@@ -1693,17 +1697,29 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
|
||||
|
||||
peer = rt->peer;
|
||||
if (peer) {
|
||||
check_peer_pmtu(dst, peer);
|
||||
check_peer_pmtu(&rt->dst, peer);
|
||||
|
||||
if (peer->redirect_genid != redirect_genid)
|
||||
peer->redirect_learned.a4 = 0;
|
||||
if (peer->redirect_learned.a4 &&
|
||||
peer->redirect_learned.a4 != rt->rt_gateway) {
|
||||
if (check_peer_redir(dst, peer))
|
||||
if (check_peer_redir(&rt->dst, peer))
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
rt->rt_peer_genid = rt_peer_genid();
|
||||
}
|
||||
return rt;
|
||||
}
|
||||
|
||||
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
|
||||
{
|
||||
struct rtable *rt = (struct rtable *) dst;
|
||||
|
||||
if (rt_is_expired(rt))
|
||||
return NULL;
|
||||
dst = (struct dst_entry *) ipv4_validate_peer(rt);
|
||||
return dst;
|
||||
}
|
||||
|
||||
@@ -1851,6 +1867,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
|
||||
dst_init_metrics(&rt->dst, peer->metrics, false);
|
||||
|
||||
check_peer_pmtu(&rt->dst, peer);
|
||||
if (peer->redirect_genid != redirect_genid)
|
||||
peer->redirect_learned.a4 = 0;
|
||||
if (peer->redirect_learned.a4 &&
|
||||
peer->redirect_learned.a4 != rt->rt_gateway) {
|
||||
rt->rt_gateway = peer->redirect_learned.a4;
|
||||
@@ -2356,6 +2374,9 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
rth->rt_mark == skb->mark &&
|
||||
net_eq(dev_net(rth->dst.dev), net) &&
|
||||
!rt_is_expired(rth)) {
|
||||
rth = ipv4_validate_peer(rth);
|
||||
if (!rth)
|
||||
continue;
|
||||
if (noref) {
|
||||
dst_use_noref(&rth->dst, jiffies);
|
||||
skb_dst_set_noref(skb, &rth->dst);
|
||||
@@ -2731,6 +2752,9 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
|
||||
(IPTOS_RT_MASK | RTO_ONLINK)) &&
|
||||
net_eq(dev_net(rth->dst.dev), net) &&
|
||||
!rt_is_expired(rth)) {
|
||||
rth = ipv4_validate_peer(rth);
|
||||
if (!rth)
|
||||
continue;
|
||||
dst_use(&rth->dst, jiffies);
|
||||
RT_CACHE_STAT_INC(out_hit);
|
||||
rcu_read_unlock_bh();
|
||||
|
@@ -1164,7 +1164,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
|
||||
struct sk_buff *skb;
|
||||
unsigned int ulen;
|
||||
unsigned int ulen, copied;
|
||||
int peeked;
|
||||
int err;
|
||||
int is_udplite = IS_UDPLITE(sk);
|
||||
@@ -1186,9 +1186,10 @@ try_again:
|
||||
goto out;
|
||||
|
||||
ulen = skb->len - sizeof(struct udphdr);
|
||||
if (len > ulen)
|
||||
len = ulen;
|
||||
else if (len < ulen)
|
||||
copied = len;
|
||||
if (copied > ulen)
|
||||
copied = ulen;
|
||||
else if (copied < ulen)
|
||||
msg->msg_flags |= MSG_TRUNC;
|
||||
|
||||
/*
|
||||
@@ -1197,14 +1198,14 @@ try_again:
|
||||
* coverage checksum (UDP-Lite), do it before the copy.
|
||||
*/
|
||||
|
||||
if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
|
||||
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
|
||||
if (udp_lib_checksum_complete(skb))
|
||||
goto csum_copy_err;
|
||||
}
|
||||
|
||||
if (skb_csum_unnecessary(skb))
|
||||
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
|
||||
msg->msg_iov, len);
|
||||
msg->msg_iov, copied);
|
||||
else {
|
||||
err = skb_copy_and_csum_datagram_iovec(skb,
|
||||
sizeof(struct udphdr),
|
||||
@@ -1233,7 +1234,7 @@ try_again:
|
||||
if (inet->cmsg_flags)
|
||||
ip_cmsg_recv(msg, skb);
|
||||
|
||||
err = len;
|
||||
err = copied;
|
||||
if (flags & MSG_TRUNC)
|
||||
err = ulen;
|
||||
|
||||
|
@@ -503,7 +503,7 @@ done:
|
||||
goto e_inval;
|
||||
if (val > 255 || val < -1)
|
||||
goto e_inval;
|
||||
np->mcast_hops = val;
|
||||
np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val);
|
||||
retv = 0;
|
||||
break;
|
||||
|
||||
|
@@ -340,7 +340,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
unsigned int ulen;
|
||||
unsigned int ulen, copied;
|
||||
int peeked;
|
||||
int err;
|
||||
int is_udplite = IS_UDPLITE(sk);
|
||||
@@ -363,9 +363,10 @@ try_again:
|
||||
goto out;
|
||||
|
||||
ulen = skb->len - sizeof(struct udphdr);
|
||||
if (len > ulen)
|
||||
len = ulen;
|
||||
else if (len < ulen)
|
||||
copied = len;
|
||||
if (copied > ulen)
|
||||
copied = ulen;
|
||||
else if (copied < ulen)
|
||||
msg->msg_flags |= MSG_TRUNC;
|
||||
|
||||
is_udp4 = (skb->protocol == htons(ETH_P_IP));
|
||||
@@ -376,14 +377,14 @@ try_again:
|
||||
* coverage checksum (UDP-Lite), do it before the copy.
|
||||
*/
|
||||
|
||||
if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
|
||||
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
|
||||
if (udp_lib_checksum_complete(skb))
|
||||
goto csum_copy_err;
|
||||
}
|
||||
|
||||
if (skb_csum_unnecessary(skb))
|
||||
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
|
||||
msg->msg_iov,len);
|
||||
msg->msg_iov, copied );
|
||||
else {
|
||||
err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
|
||||
if (err == -EINVAL)
|
||||
@@ -431,7 +432,7 @@ try_again:
|
||||
datagram_recv_ctl(sk, msg, skb);
|
||||
}
|
||||
|
||||
err = len;
|
||||
err = copied;
|
||||
if (flags & MSG_TRUNC)
|
||||
err = ulen;
|
||||
|
||||
|
@@ -1072,7 +1072,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
|
||||
|
||||
/* Get routing info from the tunnel socket */
|
||||
skb_dst_drop(skb);
|
||||
skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
|
||||
skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
|
||||
|
||||
inet = inet_sk(sk);
|
||||
fl = &inet->cork.fl;
|
||||
|
@@ -162,6 +162,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* if we're already stopping ignore any new requests to stop */
|
||||
if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
|
||||
spin_unlock_bh(&sta->lock);
|
||||
return -EALREADY;
|
||||
}
|
||||
|
||||
if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
|
||||
/* not even started yet! */
|
||||
ieee80211_assign_tid_tx(sta, tid, NULL);
|
||||
@@ -170,6 +176,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
|
||||
return 0;
|
||||
}
|
||||
|
||||
set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
|
||||
|
||||
spin_unlock_bh(&sta->lock);
|
||||
|
||||
#ifdef CONFIG_MAC80211_HT_DEBUG
|
||||
@@ -177,8 +185,6 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
|
||||
sta->sta.addr, tid);
|
||||
#endif /* CONFIG_MAC80211_HT_DEBUG */
|
||||
|
||||
set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
|
||||
|
||||
del_timer_sync(&tid_tx->addba_resp_timer);
|
||||
|
||||
/*
|
||||
@@ -188,6 +194,20 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
|
||||
*/
|
||||
clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
|
||||
|
||||
/*
|
||||
* There might be a few packets being processed right now (on
|
||||
* another CPU) that have already gotten past the aggregation
|
||||
* check when it was still OPERATIONAL and consequently have
|
||||
* IEEE80211_TX_CTL_AMPDU set. In that case, this code might
|
||||
* call into the driver at the same time or even before the
|
||||
* TX paths calls into it, which could confuse the driver.
|
||||
*
|
||||
* Wait for all currently running TX paths to finish before
|
||||
* telling the driver. New packets will not go through since
|
||||
* the aggregation session is no longer OPERATIONAL.
|
||||
*/
|
||||
synchronize_net();
|
||||
|
||||
tid_tx->stop_initiator = initiator;
|
||||
tid_tx->tx_stop = tx;
|
||||
|
||||
@@ -753,11 +773,27 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
|
||||
goto out;
|
||||
}
|
||||
|
||||
del_timer(&tid_tx->addba_resp_timer);
|
||||
del_timer_sync(&tid_tx->addba_resp_timer);
|
||||
|
||||
#ifdef CONFIG_MAC80211_HT_DEBUG
|
||||
printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* addba_resp_timer may have fired before we got here, and
|
||||
* caused WANT_STOP to be set. If the stop then was already
|
||||
* processed further, STOPPING might be set.
|
||||
*/
|
||||
if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
|
||||
test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
|
||||
#ifdef CONFIG_MAC80211_HT_DEBUG
|
||||
printk(KERN_DEBUG
|
||||
"got addBA resp for tid %d but we already gave up\n",
|
||||
tid);
|
||||
#endif
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* IEEE 802.11-2007 7.3.1.14:
|
||||
* In an ADDBA Response frame, when the Status Code field
|
||||
|
@@ -201,7 +201,6 @@ config NF_CONNTRACK_BROADCAST
|
||||
|
||||
config NF_CONNTRACK_NETBIOS_NS
|
||||
tristate "NetBIOS name service protocol support"
|
||||
depends on NETFILTER_ADVANCED
|
||||
select NF_CONNTRACK_BROADCAST
|
||||
help
|
||||
NetBIOS name service requests are sent as broadcast messages from an
|
||||
|
@@ -158,7 +158,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||
const struct ip_set_hash *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_ipport4_elem data = { };
|
||||
u32 ip, ip_to, p = 0, port, port_to;
|
||||
u32 ip, ip_to = 0, p = 0, port, port_to;
|
||||
u32 timeout = h->timeout;
|
||||
bool with_ports = false;
|
||||
int ret;
|
||||
|
@@ -162,7 +162,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||
const struct ip_set_hash *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_ipportip4_elem data = { };
|
||||
u32 ip, ip_to, p = 0, port, port_to;
|
||||
u32 ip, ip_to = 0, p = 0, port, port_to;
|
||||
u32 timeout = h->timeout;
|
||||
bool with_ports = false;
|
||||
int ret;
|
||||
|
@@ -184,7 +184,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||
const struct ip_set_hash *h = set->data;
|
||||
ipset_adtfn adtfn = set->variant->adt[adt];
|
||||
struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
|
||||
u32 ip, ip_to, p = 0, port, port_to;
|
||||
u32 ip, ip_to = 0, p = 0, port, port_to;
|
||||
u32 ip2_from = 0, ip2_to, ip2_last, ip2;
|
||||
u32 timeout = h->timeout;
|
||||
bool with_ports = false;
|
||||
|
@@ -27,22 +27,17 @@
|
||||
|
||||
static DEFINE_MUTEX(nf_ct_ecache_mutex);
|
||||
|
||||
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
|
||||
|
||||
struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(nf_expect_event_cb);
|
||||
|
||||
/* deliver cached events and clear cache entry - must be called with locally
|
||||
* disabled softirqs */
|
||||
void nf_ct_deliver_cached_events(struct nf_conn *ct)
|
||||
{
|
||||
struct net *net = nf_ct_net(ct);
|
||||
unsigned long events;
|
||||
struct nf_ct_event_notifier *notify;
|
||||
struct nf_conntrack_ecache *e;
|
||||
|
||||
rcu_read_lock();
|
||||
notify = rcu_dereference(nf_conntrack_event_cb);
|
||||
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
|
||||
if (notify == NULL)
|
||||
goto out_unlock;
|
||||
|
||||
@@ -83,19 +78,20 @@ out_unlock:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
|
||||
|
||||
int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
|
||||
int nf_conntrack_register_notifier(struct net *net,
|
||||
struct nf_ct_event_notifier *new)
|
||||
{
|
||||
int ret = 0;
|
||||
struct nf_ct_event_notifier *notify;
|
||||
|
||||
mutex_lock(&nf_ct_ecache_mutex);
|
||||
notify = rcu_dereference_protected(nf_conntrack_event_cb,
|
||||
notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
|
||||
lockdep_is_held(&nf_ct_ecache_mutex));
|
||||
if (notify != NULL) {
|
||||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
RCU_INIT_POINTER(nf_conntrack_event_cb, new);
|
||||
RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new);
|
||||
mutex_unlock(&nf_ct_ecache_mutex);
|
||||
return ret;
|
||||
|
||||
@@ -105,32 +101,34 @@ out_unlock:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
|
||||
|
||||
void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
|
||||
void nf_conntrack_unregister_notifier(struct net *net,
|
||||
struct nf_ct_event_notifier *new)
|
||||
{
|
||||
struct nf_ct_event_notifier *notify;
|
||||
|
||||
mutex_lock(&nf_ct_ecache_mutex);
|
||||
notify = rcu_dereference_protected(nf_conntrack_event_cb,
|
||||
notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
|
||||
lockdep_is_held(&nf_ct_ecache_mutex));
|
||||
BUG_ON(notify != new);
|
||||
RCU_INIT_POINTER(nf_conntrack_event_cb, NULL);
|
||||
RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
|
||||
mutex_unlock(&nf_ct_ecache_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
|
||||
|
||||
int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
|
||||
int nf_ct_expect_register_notifier(struct net *net,
|
||||
struct nf_exp_event_notifier *new)
|
||||
{
|
||||
int ret = 0;
|
||||
struct nf_exp_event_notifier *notify;
|
||||
|
||||
mutex_lock(&nf_ct_ecache_mutex);
|
||||
notify = rcu_dereference_protected(nf_expect_event_cb,
|
||||
notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
|
||||
lockdep_is_held(&nf_ct_ecache_mutex));
|
||||
if (notify != NULL) {
|
||||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
RCU_INIT_POINTER(nf_expect_event_cb, new);
|
||||
RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new);
|
||||
mutex_unlock(&nf_ct_ecache_mutex);
|
||||
return ret;
|
||||
|
||||
@@ -140,15 +138,16 @@ out_unlock:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
|
||||
|
||||
void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
|
||||
void nf_ct_expect_unregister_notifier(struct net *net,
|
||||
struct nf_exp_event_notifier *new)
|
||||
{
|
||||
struct nf_exp_event_notifier *notify;
|
||||
|
||||
mutex_lock(&nf_ct_ecache_mutex);
|
||||
notify = rcu_dereference_protected(nf_expect_event_cb,
|
||||
notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
|
||||
lockdep_is_held(&nf_ct_ecache_mutex));
|
||||
BUG_ON(notify != new);
|
||||
RCU_INIT_POINTER(nf_expect_event_cb, NULL);
|
||||
RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
|
||||
mutex_unlock(&nf_ct_ecache_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
|
||||
|
@@ -4,7 +4,7 @@
|
||||
* (C) 2001 by Jay Schulist <jschlst@samba.org>
|
||||
* (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
|
||||
* (C) 2003 by Patrick Mchardy <kaber@trash.net>
|
||||
* (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org>
|
||||
* (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org>
|
||||
*
|
||||
* Initial connection tracking via netlink development funded and
|
||||
* generally made possible by Network Robots, Inc. (www.networkrobots.com)
|
||||
@@ -2163,6 +2163,54 @@ MODULE_ALIAS("ip_conntrack_netlink");
|
||||
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
|
||||
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
|
||||
|
||||
static int __net_init ctnetlink_net_init(struct net *net)
|
||||
{
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
int ret;
|
||||
|
||||
ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
|
||||
if (ret < 0) {
|
||||
pr_err("ctnetlink_init: cannot register notifier.\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
|
||||
if (ret < 0) {
|
||||
pr_err("ctnetlink_init: cannot expect register notifier.\n");
|
||||
goto err_unreg_notifier;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
err_unreg_notifier:
|
||||
nf_conntrack_unregister_notifier(net, &ctnl_notifier);
|
||||
err_out:
|
||||
return ret;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void ctnetlink_net_exit(struct net *net)
|
||||
{
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
|
||||
nf_conntrack_unregister_notifier(net, &ctnl_notifier);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
|
||||
{
|
||||
struct net *net;
|
||||
|
||||
list_for_each_entry(net, net_exit_list, exit_list)
|
||||
ctnetlink_net_exit(net);
|
||||
}
|
||||
|
||||
static struct pernet_operations ctnetlink_net_ops = {
|
||||
.init = ctnetlink_net_init,
|
||||
.exit_batch = ctnetlink_net_exit_batch,
|
||||
};
|
||||
|
||||
static int __init ctnetlink_init(void)
|
||||
{
|
||||
int ret;
|
||||
@@ -2180,28 +2228,15 @@ static int __init ctnetlink_init(void)
|
||||
goto err_unreg_subsys;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
ret = nf_conntrack_register_notifier(&ctnl_notifier);
|
||||
if (ret < 0) {
|
||||
pr_err("ctnetlink_init: cannot register notifier.\n");
|
||||
if (register_pernet_subsys(&ctnetlink_net_ops)) {
|
||||
pr_err("ctnetlink_init: cannot register pernet operations\n");
|
||||
goto err_unreg_exp_subsys;
|
||||
}
|
||||
|
||||
ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp);
|
||||
if (ret < 0) {
|
||||
pr_err("ctnetlink_init: cannot expect register notifier.\n");
|
||||
goto err_unreg_notifier;
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
err_unreg_notifier:
|
||||
nf_conntrack_unregister_notifier(&ctnl_notifier);
|
||||
err_unreg_exp_subsys:
|
||||
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
|
||||
#endif
|
||||
err_unreg_subsys:
|
||||
nfnetlink_subsys_unregister(&ctnl_subsys);
|
||||
err_out:
|
||||
@@ -2213,11 +2248,7 @@ static void __exit ctnetlink_exit(void)
|
||||
pr_info("ctnetlink: unregistering from nfnetlink.\n");
|
||||
|
||||
nf_ct_remove_userspace_expectations();
|
||||
#ifdef CONFIG_NF_CONNTRACK_EVENTS
|
||||
nf_ct_expect_unregister_notifier(&ctnl_notifier_exp);
|
||||
nf_conntrack_unregister_notifier(&ctnl_notifier);
|
||||
#endif
|
||||
|
||||
unregister_pernet_subsys(&ctnetlink_net_ops);
|
||||
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
|
||||
nfnetlink_subsys_unregister(&ctnl_subsys);
|
||||
}
|
||||
|
@@ -111,8 +111,6 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
|
||||
struct netlbl_domaddr_map *addrmap = NULL;
|
||||
struct netlbl_domaddr4_map *map4 = NULL;
|
||||
struct netlbl_domaddr6_map *map6 = NULL;
|
||||
const struct in_addr *addr4, *mask4;
|
||||
const struct in6_addr *addr6, *mask6;
|
||||
|
||||
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
|
||||
if (entry == NULL)
|
||||
@@ -133,9 +131,9 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
|
||||
INIT_LIST_HEAD(&addrmap->list6);
|
||||
|
||||
switch (family) {
|
||||
case AF_INET:
|
||||
addr4 = addr;
|
||||
mask4 = mask;
|
||||
case AF_INET: {
|
||||
const struct in_addr *addr4 = addr;
|
||||
const struct in_addr *mask4 = mask;
|
||||
map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
|
||||
if (map4 == NULL)
|
||||
goto cfg_unlbl_map_add_failure;
|
||||
@@ -148,9 +146,11 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
|
||||
if (ret_val != 0)
|
||||
goto cfg_unlbl_map_add_failure;
|
||||
break;
|
||||
case AF_INET6:
|
||||
addr6 = addr;
|
||||
mask6 = mask;
|
||||
}
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
case AF_INET6: {
|
||||
const struct in6_addr *addr6 = addr;
|
||||
const struct in6_addr *mask6 = mask;
|
||||
map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
|
||||
if (map6 == NULL)
|
||||
goto cfg_unlbl_map_add_failure;
|
||||
@@ -167,6 +167,8 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
|
||||
if (ret_val != 0)
|
||||
goto cfg_unlbl_map_add_failure;
|
||||
break;
|
||||
}
|
||||
#endif /* IPv6 */
|
||||
default:
|
||||
goto cfg_unlbl_map_add_failure;
|
||||
break;
|
||||
@@ -225,9 +227,11 @@ int netlbl_cfg_unlbl_static_add(struct net *net,
|
||||
case AF_INET:
|
||||
addr_len = sizeof(struct in_addr);
|
||||
break;
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
case AF_INET6:
|
||||
addr_len = sizeof(struct in6_addr);
|
||||
break;
|
||||
#endif /* IPv6 */
|
||||
default:
|
||||
return -EPFNOSUPPORT;
|
||||
}
|
||||
@@ -266,9 +270,11 @@ int netlbl_cfg_unlbl_static_del(struct net *net,
|
||||
case AF_INET:
|
||||
addr_len = sizeof(struct in_addr);
|
||||
break;
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
case AF_INET6:
|
||||
addr_len = sizeof(struct in6_addr);
|
||||
break;
|
||||
#endif /* IPv6 */
|
||||
default:
|
||||
return -EPFNOSUPPORT;
|
||||
}
|
||||
|
@@ -209,8 +209,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
|
||||
ctl->Plog, ctl->Scell_log,
|
||||
nla_data(tb[TCA_RED_STAB]));
|
||||
|
||||
if (skb_queue_empty(&sch->q))
|
||||
red_end_of_idle_period(&q->parms);
|
||||
if (!q->qdisc->q.qlen)
|
||||
red_start_of_idle_period(&q->parms);
|
||||
|
||||
sch_tree_unlock(sch);
|
||||
return 0;
|
||||
|
@@ -225,11 +225,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
|
||||
|
||||
|
||||
static int
|
||||
__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
|
||||
__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
|
||||
struct net_device *dev, struct netdev_queue *txq,
|
||||
struct neighbour *mn)
|
||||
{
|
||||
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
|
||||
struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
|
||||
struct neighbour *mn = dst_get_neighbour(skb_dst(skb));
|
||||
struct teql_sched_data *q = qdisc_priv(txq->qdisc);
|
||||
struct neighbour *n = q->ncache;
|
||||
|
||||
if (mn->tbl == NULL)
|
||||
@@ -262,17 +262,26 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
|
||||
}
|
||||
|
||||
static inline int teql_resolve(struct sk_buff *skb,
|
||||
struct sk_buff *skb_res, struct net_device *dev)
|
||||
struct sk_buff *skb_res,
|
||||
struct net_device *dev,
|
||||
struct netdev_queue *txq)
|
||||
{
|
||||
struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct neighbour *mn;
|
||||
int res;
|
||||
|
||||
if (txq->qdisc == &noop_qdisc)
|
||||
return -ENODEV;
|
||||
|
||||
if (dev->header_ops == NULL ||
|
||||
skb_dst(skb) == NULL ||
|
||||
dst_get_neighbour(skb_dst(skb)) == NULL)
|
||||
if (!dev->header_ops || !dst)
|
||||
return 0;
|
||||
return __teql_resolve(skb, skb_res, dev);
|
||||
|
||||
rcu_read_lock();
|
||||
mn = dst_get_neighbour(dst);
|
||||
res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
|
||||
rcu_read_unlock();
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
@@ -307,7 +316,7 @@ restart:
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (teql_resolve(skb, skb_res, slave)) {
|
||||
switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
|
||||
case 0:
|
||||
if (__netif_tx_trylock(slave_txq)) {
|
||||
unsigned int length = qdisc_pkt_len(skb);
|
||||
|
@@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
|
||||
struct sctp_auth_bytes *key;
|
||||
|
||||
/* Verify that we are not going to overflow INT_MAX */
|
||||
if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
|
||||
if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
|
||||
return NULL;
|
||||
|
||||
/* Allocate the shared key */
|
||||
|
@@ -496,7 +496,7 @@ static int xs_nospace(struct rpc_task *task)
|
||||
struct rpc_rqst *req = task->tk_rqstp;
|
||||
struct rpc_xprt *xprt = req->rq_xprt;
|
||||
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
||||
int ret = 0;
|
||||
int ret = -EAGAIN;
|
||||
|
||||
dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
|
||||
task->tk_pid, req->rq_slen - req->rq_bytes_sent,
|
||||
@@ -508,7 +508,6 @@ static int xs_nospace(struct rpc_task *task)
|
||||
/* Don't race with disconnect */
|
||||
if (xprt_connected(xprt)) {
|
||||
if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
|
||||
ret = -EAGAIN;
|
||||
/*
|
||||
* Notify TCP that we're limited by the application
|
||||
* window size
|
||||
@@ -2530,8 +2529,10 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
|
||||
int err;
|
||||
err = xs_init_anyaddr(args->dstaddr->sa_family,
|
||||
(struct sockaddr *)&new->srcaddr);
|
||||
if (err != 0)
|
||||
if (err != 0) {
|
||||
xprt_free(xprt);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
}
|
||||
|
||||
return xprt;
|
||||
|
@@ -1957,6 +1957,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||
if ((UNIXCB(skb).pid != siocb->scm->pid) ||
|
||||
(UNIXCB(skb).cred != siocb->scm->cred)) {
|
||||
skb_queue_head(&sk->sk_receive_queue, skb);
|
||||
sk->sk_data_ready(sk, skb->len);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
@@ -1974,6 +1975,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||
chunk = min_t(unsigned int, skb->len, size);
|
||||
if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
|
||||
skb_queue_head(&sk->sk_receive_queue, skb);
|
||||
sk->sk_data_ready(sk, skb->len);
|
||||
if (copied == 0)
|
||||
copied = -EFAULT;
|
||||
break;
|
||||
@@ -1991,6 +1993,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||
/* put the skb back if we didn't use it up.. */
|
||||
if (skb->len) {
|
||||
skb_queue_head(&sk->sk_receive_queue, skb);
|
||||
sk->sk_data_ready(sk, skb->len);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -2006,6 +2009,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||
|
||||
/* put message back and return */
|
||||
skb_queue_head(&sk->sk_receive_queue, skb);
|
||||
sk->sk_data_ready(sk, skb->len);
|
||||
break;
|
||||
}
|
||||
} while (size);
|
||||
|
@@ -89,8 +89,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
|
||||
[NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
|
||||
[NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
|
||||
|
||||
[NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN },
|
||||
[NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN },
|
||||
[NL80211_ATTR_MAC] = { .len = ETH_ALEN },
|
||||
[NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN },
|
||||
|
||||
[NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
|
||||
[NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
|
||||
|
Reference in New Issue
Block a user