Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (69 commits) pptp: Accept packet with seq zero RDS: Remove some unused iWARP code net: fsl: fec: handle 10Mbps speed in RMII mode drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c: add missing iounmap drivers/net/ethernet/tundra/tsi108_eth.c: add missing iounmap ksz884x: fix mtu for VLAN net_sched: sfq: add optional RED on top of SFQ dp83640: Fix NOHZ local_softirq_pending 08 warning gianfar: Fix invalid TX frames returned on error queue when time stamping gianfar: Fix missing sock reference when processing TX time stamps phylib: introduce mdiobus_alloc_size() net: decrement memcg jump label when limit, not usage, is changed net: reintroduce missing rcu_assign_pointer() calls inet_diag: Rename inet_diag_req_compat into inet_diag_req inet_diag: Rename inet_diag_req into inet_diag_req_v2 bond_alb: don't disable softirq under bond_alb_xmit mac80211: fix rx->key NULL pointer dereference in promiscuous mode nl80211: fix old station flags compatibility mdio-octeon: use an unique MDIO bus name. mdio-gpio: use an unique MDIO bus name. ...
This commit is contained in:
@@ -156,17 +156,17 @@ static int bt_sock_create(struct net *net, struct socket *sock, int proto,
|
||||
|
||||
void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
|
||||
{
|
||||
write_lock_bh(&l->lock);
|
||||
write_lock(&l->lock);
|
||||
sk_add_node(sk, &l->head);
|
||||
write_unlock_bh(&l->lock);
|
||||
write_unlock(&l->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(bt_sock_link);
|
||||
|
||||
void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
|
||||
{
|
||||
write_lock_bh(&l->lock);
|
||||
write_lock(&l->lock);
|
||||
sk_del_node_init(sk);
|
||||
write_unlock_bh(&l->lock);
|
||||
write_unlock(&l->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(bt_sock_unlink);
|
||||
|
||||
|
@@ -711,7 +711,14 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
|
||||
if (rp->status)
|
||||
return;
|
||||
|
||||
memcpy(hdev->extfeatures, rp->features, 8);
|
||||
switch (rp->page) {
|
||||
case 0:
|
||||
memcpy(hdev->features, rp->features, 8);
|
||||
break;
|
||||
case 1:
|
||||
memcpy(hdev->host_features, rp->features, 8);
|
||||
break;
|
||||
}
|
||||
|
||||
hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
|
||||
}
|
||||
@@ -1047,9 +1054,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
|
||||
case LE_SCANNING_DISABLED:
|
||||
clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
|
||||
|
||||
cancel_delayed_work_sync(&hdev->adv_work);
|
||||
queue_delayed_work(hdev->workqueue, &hdev->adv_work,
|
||||
jiffies + ADV_CLEAR_TIMEOUT);
|
||||
schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
|
||||
break;
|
||||
|
||||
default:
|
||||
@@ -2266,20 +2271,19 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
|
||||
struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
|
||||
int i;
|
||||
|
||||
skb_pull(skb, sizeof(*ev));
|
||||
|
||||
BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
|
||||
|
||||
if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
|
||||
BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
|
||||
return;
|
||||
}
|
||||
|
||||
if (skb->len < ev->num_hndl * 4) {
|
||||
if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
|
||||
ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
|
||||
BT_DBG("%s bad parameters", hdev->name);
|
||||
return;
|
||||
}
|
||||
|
||||
BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
|
||||
|
||||
for (i = 0; i < ev->num_hndl; i++) {
|
||||
struct hci_comp_pkts_info *info = &ev->handles[i];
|
||||
struct hci_conn *conn;
|
||||
|
@@ -767,7 +767,6 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event,
|
||||
/* Detach sockets from device */
|
||||
read_lock(&hci_sk_list.lock);
|
||||
sk_for_each(sk, node, &hci_sk_list.head) {
|
||||
local_bh_disable();
|
||||
bh_lock_sock_nested(sk);
|
||||
if (hci_pi(sk)->hdev == hdev) {
|
||||
hci_pi(sk)->hdev = NULL;
|
||||
@@ -778,7 +777,6 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event,
|
||||
hci_dev_put(hdev);
|
||||
}
|
||||
bh_unlock_sock(sk);
|
||||
local_bh_enable();
|
||||
}
|
||||
read_unlock(&hci_sk_list.lock);
|
||||
}
|
||||
|
@@ -165,7 +165,7 @@ int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
|
||||
{
|
||||
int err;
|
||||
|
||||
write_lock_bh(&chan_list_lock);
|
||||
write_lock(&chan_list_lock);
|
||||
|
||||
if (psm && __l2cap_global_chan_by_addr(psm, src)) {
|
||||
err = -EADDRINUSE;
|
||||
@@ -190,17 +190,17 @@ int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
|
||||
}
|
||||
|
||||
done:
|
||||
write_unlock_bh(&chan_list_lock);
|
||||
write_unlock(&chan_list_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
|
||||
{
|
||||
write_lock_bh(&chan_list_lock);
|
||||
write_lock(&chan_list_lock);
|
||||
|
||||
chan->scid = scid;
|
||||
|
||||
write_unlock_bh(&chan_list_lock);
|
||||
write_unlock(&chan_list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -289,9 +289,9 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
|
||||
|
||||
chan->sk = sk;
|
||||
|
||||
write_lock_bh(&chan_list_lock);
|
||||
write_lock(&chan_list_lock);
|
||||
list_add(&chan->global_l, &chan_list);
|
||||
write_unlock_bh(&chan_list_lock);
|
||||
write_unlock(&chan_list_lock);
|
||||
|
||||
INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
|
||||
|
||||
@@ -306,9 +306,9 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
|
||||
|
||||
void l2cap_chan_destroy(struct l2cap_chan *chan)
|
||||
{
|
||||
write_lock_bh(&chan_list_lock);
|
||||
write_lock(&chan_list_lock);
|
||||
list_del(&chan->global_l);
|
||||
write_unlock_bh(&chan_list_lock);
|
||||
write_unlock(&chan_list_lock);
|
||||
|
||||
l2cap_chan_put(chan);
|
||||
}
|
||||
@@ -543,14 +543,14 @@ static u8 l2cap_get_ident(struct l2cap_conn *conn)
|
||||
* 200 - 254 are used by utilities like l2ping, etc.
|
||||
*/
|
||||
|
||||
spin_lock_bh(&conn->lock);
|
||||
spin_lock(&conn->lock);
|
||||
|
||||
if (++conn->tx_ident > 128)
|
||||
conn->tx_ident = 1;
|
||||
|
||||
id = conn->tx_ident;
|
||||
|
||||
spin_unlock_bh(&conn->lock);
|
||||
spin_unlock(&conn->lock);
|
||||
|
||||
return id;
|
||||
}
|
||||
@@ -1190,7 +1190,7 @@ inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdad
|
||||
}
|
||||
|
||||
/* Set destination address and psm */
|
||||
bacpy(&bt_sk(sk)->dst, src);
|
||||
bacpy(&bt_sk(sk)->dst, dst);
|
||||
chan->psm = psm;
|
||||
chan->dcid = cid;
|
||||
|
||||
@@ -4702,7 +4702,7 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
|
||||
{
|
||||
struct l2cap_chan *c;
|
||||
|
||||
read_lock_bh(&chan_list_lock);
|
||||
read_lock(&chan_list_lock);
|
||||
|
||||
list_for_each_entry(c, &chan_list, global_l) {
|
||||
struct sock *sk = c->sk;
|
||||
@@ -4715,7 +4715,7 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
|
||||
c->sec_level, c->mode);
|
||||
}
|
||||
|
||||
read_unlock_bh(&chan_list_lock);
|
||||
read_unlock(&chan_list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -587,6 +587,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
|
||||
if (smp_conn_security(conn, sec.level))
|
||||
break;
|
||||
sk->sk_state = BT_CONFIG;
|
||||
chan->state = BT_CONFIG;
|
||||
|
||||
/* or for ACL link, under defer_setup time */
|
||||
} else if (sk->sk_state == BT_CONNECT2 &&
|
||||
@@ -731,6 +732,7 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
|
||||
|
||||
if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
|
||||
sk->sk_state = BT_CONFIG;
|
||||
pi->chan->state = BT_CONFIG;
|
||||
|
||||
__l2cap_connect_rsp_defer(pi->chan);
|
||||
release_sock(sk);
|
||||
|
@@ -291,7 +291,7 @@ static u32 get_current_settings(struct hci_dev *hdev)
|
||||
if (!(hdev->features[4] & LMP_NO_BREDR))
|
||||
settings |= MGMT_SETTING_BREDR;
|
||||
|
||||
if (hdev->extfeatures[0] & LMP_HOST_LE)
|
||||
if (hdev->host_features[0] & LMP_HOST_LE)
|
||||
settings |= MGMT_SETTING_LE;
|
||||
|
||||
if (test_bit(HCI_AUTH, &hdev->flags))
|
||||
@@ -2756,7 +2756,7 @@ int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
|
||||
if (!cmd)
|
||||
return -ENOENT;
|
||||
|
||||
err = cmd_status(cmd->sk, hdev->id, cmd->opcode, status);
|
||||
err = cmd_status(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status));
|
||||
mgmt_pending_remove(cmd);
|
||||
|
||||
return err;
|
||||
|
@@ -370,7 +370,7 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
|
||||
goto done;
|
||||
}
|
||||
|
||||
write_lock_bh(&rfcomm_sk_list.lock);
|
||||
write_lock(&rfcomm_sk_list.lock);
|
||||
|
||||
if (sa->rc_channel && __rfcomm_get_sock_by_addr(sa->rc_channel, &sa->rc_bdaddr)) {
|
||||
err = -EADDRINUSE;
|
||||
@@ -381,7 +381,7 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
|
||||
sk->sk_state = BT_BOUND;
|
||||
}
|
||||
|
||||
write_unlock_bh(&rfcomm_sk_list.lock);
|
||||
write_unlock(&rfcomm_sk_list.lock);
|
||||
|
||||
done:
|
||||
release_sock(sk);
|
||||
@@ -455,7 +455,7 @@ static int rfcomm_sock_listen(struct socket *sock, int backlog)
|
||||
|
||||
err = -EINVAL;
|
||||
|
||||
write_lock_bh(&rfcomm_sk_list.lock);
|
||||
write_lock(&rfcomm_sk_list.lock);
|
||||
|
||||
for (channel = 1; channel < 31; channel++)
|
||||
if (!__rfcomm_get_sock_by_addr(channel, src)) {
|
||||
@@ -464,7 +464,7 @@ static int rfcomm_sock_listen(struct socket *sock, int backlog)
|
||||
break;
|
||||
}
|
||||
|
||||
write_unlock_bh(&rfcomm_sk_list.lock);
|
||||
write_unlock(&rfcomm_sk_list.lock);
|
||||
|
||||
if (err < 0)
|
||||
goto done;
|
||||
@@ -982,7 +982,7 @@ static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
|
||||
struct sock *sk;
|
||||
struct hlist_node *node;
|
||||
|
||||
read_lock_bh(&rfcomm_sk_list.lock);
|
||||
read_lock(&rfcomm_sk_list.lock);
|
||||
|
||||
sk_for_each(sk, node, &rfcomm_sk_list.head) {
|
||||
seq_printf(f, "%s %s %d %d\n",
|
||||
@@ -991,7 +991,7 @@ static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
|
||||
sk->sk_state, rfcomm_pi(sk)->channel);
|
||||
}
|
||||
|
||||
read_unlock_bh(&rfcomm_sk_list.lock);
|
||||
read_unlock(&rfcomm_sk_list.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -76,7 +76,7 @@ struct rfcomm_dev {
|
||||
};
|
||||
|
||||
static LIST_HEAD(rfcomm_dev_list);
|
||||
static DEFINE_RWLOCK(rfcomm_dev_lock);
|
||||
static DEFINE_SPINLOCK(rfcomm_dev_lock);
|
||||
|
||||
static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);
|
||||
static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err);
|
||||
@@ -146,7 +146,7 @@ static inline struct rfcomm_dev *rfcomm_dev_get(int id)
|
||||
{
|
||||
struct rfcomm_dev *dev;
|
||||
|
||||
read_lock(&rfcomm_dev_lock);
|
||||
spin_lock(&rfcomm_dev_lock);
|
||||
|
||||
dev = __rfcomm_dev_get(id);
|
||||
|
||||
@@ -157,7 +157,7 @@ static inline struct rfcomm_dev *rfcomm_dev_get(int id)
|
||||
rfcomm_dev_hold(dev);
|
||||
}
|
||||
|
||||
read_unlock(&rfcomm_dev_lock);
|
||||
spin_unlock(&rfcomm_dev_lock);
|
||||
|
||||
return dev;
|
||||
}
|
||||
@@ -205,7 +205,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
|
||||
write_lock_bh(&rfcomm_dev_lock);
|
||||
spin_lock(&rfcomm_dev_lock);
|
||||
|
||||
if (req->dev_id < 0) {
|
||||
dev->id = 0;
|
||||
@@ -290,7 +290,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
|
||||
__module_get(THIS_MODULE);
|
||||
|
||||
out:
|
||||
write_unlock_bh(&rfcomm_dev_lock);
|
||||
spin_unlock(&rfcomm_dev_lock);
|
||||
|
||||
if (err < 0)
|
||||
goto free;
|
||||
@@ -327,9 +327,9 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
|
||||
if (atomic_read(&dev->opened) > 0)
|
||||
return;
|
||||
|
||||
write_lock_bh(&rfcomm_dev_lock);
|
||||
spin_lock(&rfcomm_dev_lock);
|
||||
list_del_init(&dev->list);
|
||||
write_unlock_bh(&rfcomm_dev_lock);
|
||||
spin_unlock(&rfcomm_dev_lock);
|
||||
|
||||
rfcomm_dev_put(dev);
|
||||
}
|
||||
@@ -473,7 +473,7 @@ static int rfcomm_get_dev_list(void __user *arg)
|
||||
|
||||
di = dl->dev_info;
|
||||
|
||||
read_lock_bh(&rfcomm_dev_lock);
|
||||
spin_lock(&rfcomm_dev_lock);
|
||||
|
||||
list_for_each_entry(dev, &rfcomm_dev_list, list) {
|
||||
if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
|
||||
@@ -488,7 +488,7 @@ static int rfcomm_get_dev_list(void __user *arg)
|
||||
break;
|
||||
}
|
||||
|
||||
read_unlock_bh(&rfcomm_dev_lock);
|
||||
spin_unlock(&rfcomm_dev_lock);
|
||||
|
||||
dl->dev_num = n;
|
||||
size = sizeof(*dl) + n * sizeof(*di);
|
||||
@@ -766,9 +766,9 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
|
||||
rfcomm_dlc_unlock(dev->dlc);
|
||||
|
||||
if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) {
|
||||
write_lock_bh(&rfcomm_dev_lock);
|
||||
spin_lock(&rfcomm_dev_lock);
|
||||
list_del_init(&dev->list);
|
||||
write_unlock_bh(&rfcomm_dev_lock);
|
||||
spin_unlock(&rfcomm_dev_lock);
|
||||
|
||||
rfcomm_dev_put(dev);
|
||||
}
|
||||
|
@@ -482,7 +482,7 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
|
||||
goto done;
|
||||
}
|
||||
|
||||
write_lock_bh(&sco_sk_list.lock);
|
||||
write_lock(&sco_sk_list.lock);
|
||||
|
||||
if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) {
|
||||
err = -EADDRINUSE;
|
||||
@@ -492,7 +492,7 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
|
||||
sk->sk_state = BT_BOUND;
|
||||
}
|
||||
|
||||
write_unlock_bh(&sco_sk_list.lock);
|
||||
write_unlock(&sco_sk_list.lock);
|
||||
|
||||
done:
|
||||
release_sock(sk);
|
||||
@@ -965,14 +965,14 @@ static int sco_debugfs_show(struct seq_file *f, void *p)
|
||||
struct sock *sk;
|
||||
struct hlist_node *node;
|
||||
|
||||
read_lock_bh(&sco_sk_list.lock);
|
||||
read_lock(&sco_sk_list.lock);
|
||||
|
||||
sk_for_each(sk, node, &sco_sk_list.head) {
|
||||
seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
|
||||
batostr(&bt_sk(sk)->dst), sk->sk_state);
|
||||
}
|
||||
|
||||
read_unlock_bh(&sco_sk_list.lock);
|
||||
read_unlock(&sco_sk_list.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1177,9 +1177,9 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
|
||||
nonempty = 1;
|
||||
}
|
||||
|
||||
if (nonempty)
|
||||
RCU_INIT_POINTER(dev->xps_maps, new_dev_maps);
|
||||
else {
|
||||
if (nonempty) {
|
||||
rcu_assign_pointer(dev->xps_maps, new_dev_maps);
|
||||
} else {
|
||||
kfree(new_dev_maps);
|
||||
RCU_INIT_POINTER(dev->xps_maps, NULL);
|
||||
}
|
||||
|
@@ -765,7 +765,7 @@ int __netpoll_setup(struct netpoll *np)
|
||||
}
|
||||
|
||||
/* last thing to do is link it to the net device structure */
|
||||
RCU_INIT_POINTER(ndev->npinfo, npinfo);
|
||||
rcu_assign_pointer(ndev->npinfo, npinfo);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@@ -49,13 +49,13 @@ static void dccp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
||||
}
|
||||
|
||||
static void dccp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct inet_diag_req *r, struct nlattr *bc)
|
||||
struct inet_diag_req_v2 *r, struct nlattr *bc)
|
||||
{
|
||||
inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r, bc);
|
||||
}
|
||||
|
||||
static int dccp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
|
||||
struct inet_diag_req *req)
|
||||
struct inet_diag_req_v2 *req)
|
||||
{
|
||||
return inet_diag_dump_one_icsk(&dccp_hashinfo, in_skb, nlh, req);
|
||||
}
|
||||
|
@@ -388,7 +388,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
|
||||
}
|
||||
|
||||
ifa->ifa_next = dn_db->ifa_list;
|
||||
RCU_INIT_POINTER(dn_db->ifa_list, ifa);
|
||||
rcu_assign_pointer(dn_db->ifa_list, ifa);
|
||||
|
||||
dn_ifaddr_notify(RTM_NEWADDR, ifa);
|
||||
blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
|
||||
@@ -1093,7 +1093,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
|
||||
|
||||
memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
|
||||
|
||||
RCU_INIT_POINTER(dev->dn_ptr, dn_db);
|
||||
rcu_assign_pointer(dev->dn_ptr, dn_db);
|
||||
dn_db->dev = dev;
|
||||
init_timer(&dn_db->timer);
|
||||
|
||||
|
@@ -258,7 +258,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
|
||||
ip_mc_up(in_dev);
|
||||
|
||||
/* we can receive as soon as ip_ptr is set -- do this last */
|
||||
RCU_INIT_POINTER(dev->ip_ptr, in_dev);
|
||||
rcu_assign_pointer(dev->ip_ptr, in_dev);
|
||||
out:
|
||||
return in_dev;
|
||||
out_kfree:
|
||||
|
@@ -205,7 +205,7 @@ static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
|
||||
return (struct tnode *)(parent & ~NODE_TYPE_MASK);
|
||||
}
|
||||
|
||||
/* Same as RCU_INIT_POINTER
|
||||
/* Same as rcu_assign_pointer
|
||||
* but that macro() assumes that value is a pointer.
|
||||
*/
|
||||
static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
|
||||
@@ -529,7 +529,7 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *
|
||||
if (n)
|
||||
node_set_parent(n, tn);
|
||||
|
||||
RCU_INIT_POINTER(tn->child[i], n);
|
||||
rcu_assign_pointer(tn->child[i], n);
|
||||
}
|
||||
|
||||
#define MAX_WORK 10
|
||||
@@ -1015,7 +1015,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
|
||||
|
||||
tp = node_parent((struct rt_trie_node *) tn);
|
||||
if (!tp)
|
||||
RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
|
||||
rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
|
||||
|
||||
tnode_free_flush();
|
||||
if (!tp)
|
||||
@@ -1027,7 +1027,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
|
||||
if (IS_TNODE(tn))
|
||||
tn = (struct tnode *)resize(t, (struct tnode *)tn);
|
||||
|
||||
RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
|
||||
rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
|
||||
tnode_free_flush();
|
||||
}
|
||||
|
||||
@@ -1164,7 +1164,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
|
||||
put_child(t, (struct tnode *)tp, cindex,
|
||||
(struct rt_trie_node *)tn);
|
||||
} else {
|
||||
RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
|
||||
rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
|
||||
tp = tn;
|
||||
}
|
||||
}
|
||||
|
@@ -1249,7 +1249,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
|
||||
|
||||
im->next_rcu = in_dev->mc_list;
|
||||
in_dev->mc_count++;
|
||||
RCU_INIT_POINTER(in_dev->mc_list, im);
|
||||
rcu_assign_pointer(in_dev->mc_list, im);
|
||||
|
||||
#ifdef CONFIG_IP_MULTICAST
|
||||
igmpv3_del_delrec(in_dev, im->multiaddr);
|
||||
@@ -1821,7 +1821,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
|
||||
iml->next_rcu = inet->mc_list;
|
||||
iml->sflist = NULL;
|
||||
iml->sfmode = MCAST_EXCLUDE;
|
||||
RCU_INIT_POINTER(inet->mc_list, iml);
|
||||
rcu_assign_pointer(inet->mc_list, iml);
|
||||
ip_mc_inc_group(in_dev, addr);
|
||||
err = 0;
|
||||
done:
|
||||
@@ -2008,7 +2008,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
|
||||
atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
|
||||
kfree_rcu(psl, rcu);
|
||||
}
|
||||
RCU_INIT_POINTER(pmc->sflist, newpsl);
|
||||
rcu_assign_pointer(pmc->sflist, newpsl);
|
||||
psl = newpsl;
|
||||
}
|
||||
rv = 1; /* > 0 for insert logic below if sl_count is 0 */
|
||||
@@ -2111,7 +2111,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
|
||||
} else
|
||||
(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
|
||||
0, NULL, 0);
|
||||
RCU_INIT_POINTER(pmc->sflist, newpsl);
|
||||
rcu_assign_pointer(pmc->sflist, newpsl);
|
||||
pmc->sfmode = msf->imsf_fmode;
|
||||
err = 0;
|
||||
done:
|
||||
|
@@ -71,7 +71,7 @@ static inline void inet_diag_unlock_handler(
|
||||
}
|
||||
|
||||
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
|
||||
struct sk_buff *skb, struct inet_diag_req *req,
|
||||
struct sk_buff *skb, struct inet_diag_req_v2 *req,
|
||||
u32 pid, u32 seq, u16 nlmsg_flags,
|
||||
const struct nlmsghdr *unlh)
|
||||
{
|
||||
@@ -193,7 +193,7 @@ nlmsg_failure:
|
||||
EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
|
||||
|
||||
static int inet_csk_diag_fill(struct sock *sk,
|
||||
struct sk_buff *skb, struct inet_diag_req *req,
|
||||
struct sk_buff *skb, struct inet_diag_req_v2 *req,
|
||||
u32 pid, u32 seq, u16 nlmsg_flags,
|
||||
const struct nlmsghdr *unlh)
|
||||
{
|
||||
@@ -202,7 +202,7 @@ static int inet_csk_diag_fill(struct sock *sk,
|
||||
}
|
||||
|
||||
static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
|
||||
struct sk_buff *skb, struct inet_diag_req *req,
|
||||
struct sk_buff *skb, struct inet_diag_req_v2 *req,
|
||||
u32 pid, u32 seq, u16 nlmsg_flags,
|
||||
const struct nlmsghdr *unlh)
|
||||
{
|
||||
@@ -253,7 +253,7 @@ nlmsg_failure:
|
||||
}
|
||||
|
||||
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
|
||||
struct inet_diag_req *r, u32 pid, u32 seq, u16 nlmsg_flags,
|
||||
struct inet_diag_req_v2 *r, u32 pid, u32 seq, u16 nlmsg_flags,
|
||||
const struct nlmsghdr *unlh)
|
||||
{
|
||||
if (sk->sk_state == TCP_TIME_WAIT)
|
||||
@@ -264,7 +264,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
|
||||
const struct nlmsghdr *nlh, struct inet_diag_req *req)
|
||||
const struct nlmsghdr *nlh, struct inet_diag_req_v2 *req)
|
||||
{
|
||||
int err;
|
||||
struct sock *sk;
|
||||
@@ -333,7 +333,7 @@ EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
|
||||
|
||||
static int inet_diag_get_exact(struct sk_buff *in_skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
struct inet_diag_req *req)
|
||||
struct inet_diag_req_v2 *req)
|
||||
{
|
||||
const struct inet_diag_handler *handler;
|
||||
int err;
|
||||
@@ -540,7 +540,7 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
|
||||
static int inet_csk_diag_dump(struct sock *sk,
|
||||
struct sk_buff *skb,
|
||||
struct netlink_callback *cb,
|
||||
struct inet_diag_req *r,
|
||||
struct inet_diag_req_v2 *r,
|
||||
const struct nlattr *bc)
|
||||
{
|
||||
if (!inet_diag_bc_sk(bc, sk))
|
||||
@@ -554,7 +554,7 @@ static int inet_csk_diag_dump(struct sock *sk,
|
||||
static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
|
||||
struct sk_buff *skb,
|
||||
struct netlink_callback *cb,
|
||||
struct inet_diag_req *r,
|
||||
struct inet_diag_req_v2 *r,
|
||||
const struct nlattr *bc)
|
||||
{
|
||||
if (bc != NULL) {
|
||||
@@ -639,7 +639,7 @@ nlmsg_failure:
|
||||
|
||||
static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
|
||||
struct netlink_callback *cb,
|
||||
struct inet_diag_req *r,
|
||||
struct inet_diag_req_v2 *r,
|
||||
const struct nlattr *bc)
|
||||
{
|
||||
struct inet_diag_entry entry;
|
||||
@@ -721,7 +721,7 @@ out:
|
||||
}
|
||||
|
||||
void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
|
||||
struct netlink_callback *cb, struct inet_diag_req *r, struct nlattr *bc)
|
||||
struct netlink_callback *cb, struct inet_diag_req_v2 *r, struct nlattr *bc)
|
||||
{
|
||||
int i, num;
|
||||
int s_i, s_num;
|
||||
@@ -872,7 +872,7 @@ out:
|
||||
EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
|
||||
|
||||
static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct inet_diag_req *r, struct nlattr *bc)
|
||||
struct inet_diag_req_v2 *r, struct nlattr *bc)
|
||||
{
|
||||
const struct inet_diag_handler *handler;
|
||||
|
||||
@@ -887,12 +887,12 @@ static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
struct nlattr *bc = NULL;
|
||||
int hdrlen = sizeof(struct inet_diag_req);
|
||||
int hdrlen = sizeof(struct inet_diag_req_v2);
|
||||
|
||||
if (nlmsg_attrlen(cb->nlh, hdrlen))
|
||||
bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
|
||||
|
||||
return __inet_diag_dump(skb, cb, (struct inet_diag_req *)NLMSG_DATA(cb->nlh), bc);
|
||||
return __inet_diag_dump(skb, cb, (struct inet_diag_req_v2 *)NLMSG_DATA(cb->nlh), bc);
|
||||
}
|
||||
|
||||
static inline int inet_diag_type2proto(int type)
|
||||
@@ -909,10 +909,10 @@ static inline int inet_diag_type2proto(int type)
|
||||
|
||||
static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
struct inet_diag_req_compat *rc = NLMSG_DATA(cb->nlh);
|
||||
struct inet_diag_req req;
|
||||
struct inet_diag_req *rc = NLMSG_DATA(cb->nlh);
|
||||
struct inet_diag_req_v2 req;
|
||||
struct nlattr *bc = NULL;
|
||||
int hdrlen = sizeof(struct inet_diag_req_compat);
|
||||
int hdrlen = sizeof(struct inet_diag_req);
|
||||
|
||||
req.sdiag_family = AF_UNSPEC; /* compatibility */
|
||||
req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
|
||||
@@ -929,8 +929,8 @@ static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *c
|
||||
static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
|
||||
const struct nlmsghdr *nlh)
|
||||
{
|
||||
struct inet_diag_req_compat *rc = NLMSG_DATA(nlh);
|
||||
struct inet_diag_req req;
|
||||
struct inet_diag_req *rc = NLMSG_DATA(nlh);
|
||||
struct inet_diag_req_v2 req;
|
||||
|
||||
req.sdiag_family = rc->idiag_family;
|
||||
req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
|
||||
@@ -943,7 +943,7 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
|
||||
|
||||
static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
{
|
||||
int hdrlen = sizeof(struct inet_diag_req_compat);
|
||||
int hdrlen = sizeof(struct inet_diag_req);
|
||||
|
||||
if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
|
||||
nlmsg_len(nlh) < hdrlen)
|
||||
@@ -970,7 +970,7 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
|
||||
static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
|
||||
{
|
||||
int hdrlen = sizeof(struct inet_diag_req);
|
||||
int hdrlen = sizeof(struct inet_diag_req_v2);
|
||||
|
||||
if (nlmsg_len(h) < hdrlen)
|
||||
return -EINVAL;
|
||||
@@ -990,7 +990,7 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
|
||||
inet_diag_dump, NULL, 0);
|
||||
}
|
||||
|
||||
return inet_diag_get_exact(skb, h, (struct inet_diag_req *)NLMSG_DATA(h));
|
||||
return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h));
|
||||
}
|
||||
|
||||
static struct sock_diag_handler inet_diag_handler = {
|
||||
|
@@ -231,7 +231,7 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
|
||||
(iter = rtnl_dereference(*tp)) != NULL;
|
||||
tp = &iter->next) {
|
||||
if (t == iter) {
|
||||
RCU_INIT_POINTER(*tp, t->next);
|
||||
rcu_assign_pointer(*tp, t->next);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -241,8 +241,8 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
|
||||
{
|
||||
struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
|
||||
|
||||
RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
|
||||
RCU_INIT_POINTER(*tp, t);
|
||||
rcu_assign_pointer(t->next, rtnl_dereference(*tp));
|
||||
rcu_assign_pointer(*tp, t);
|
||||
}
|
||||
|
||||
static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
|
||||
@@ -792,7 +792,7 @@ static int __net_init ipip_fb_tunnel_init(struct net_device *dev)
|
||||
return -ENOMEM;
|
||||
|
||||
dev_hold(dev);
|
||||
RCU_INIT_POINTER(ipn->tunnels_wc[0], tunnel);
|
||||
rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -1225,7 +1225,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
|
||||
|
||||
ret = ip_ra_control(sk, 1, mrtsock_destruct);
|
||||
if (ret == 0) {
|
||||
RCU_INIT_POINTER(mrt->mroute_sk, sk);
|
||||
rcu_assign_pointer(mrt->mroute_sk, sk);
|
||||
IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
@@ -35,13 +35,13 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
|
||||
}
|
||||
|
||||
static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct inet_diag_req *r, struct nlattr *bc)
|
||||
struct inet_diag_req_v2 *r, struct nlattr *bc)
|
||||
{
|
||||
inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc);
|
||||
}
|
||||
|
||||
static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
|
||||
struct inet_diag_req *req)
|
||||
struct inet_diag_req_v2 *req)
|
||||
{
|
||||
return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req);
|
||||
}
|
||||
|
@@ -108,7 +108,7 @@ void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
|
||||
tcp = tcp_from_cgproto(cg_proto);
|
||||
percpu_counter_destroy(&tcp->tcp_sockets_allocated);
|
||||
|
||||
val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
|
||||
val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
|
||||
|
||||
if (val != RESOURCE_MAX)
|
||||
jump_label_dec(&memcg_socket_limit_enabled);
|
||||
|
@@ -19,7 +19,7 @@
|
||||
#include <linux/sock_diag.h>
|
||||
|
||||
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
|
||||
struct netlink_callback *cb, struct inet_diag_req *req,
|
||||
struct netlink_callback *cb, struct inet_diag_req_v2 *req,
|
||||
struct nlattr *bc)
|
||||
{
|
||||
if (!inet_diag_bc_sk(bc, sk))
|
||||
@@ -30,7 +30,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
|
||||
const struct nlmsghdr *nlh, struct inet_diag_req *req)
|
||||
const struct nlmsghdr *nlh, struct inet_diag_req_v2 *req)
|
||||
{
|
||||
int err = -EINVAL;
|
||||
struct sock *sk;
|
||||
@@ -88,7 +88,7 @@ out_nosk:
|
||||
}
|
||||
|
||||
static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct inet_diag_req *r, struct nlattr *bc)
|
||||
struct inet_diag_req_v2 *r, struct nlattr *bc)
|
||||
{
|
||||
int num, s_num, slot, s_slot;
|
||||
|
||||
@@ -136,13 +136,13 @@ done:
|
||||
}
|
||||
|
||||
static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct inet_diag_req *r, struct nlattr *bc)
|
||||
struct inet_diag_req_v2 *r, struct nlattr *bc)
|
||||
{
|
||||
udp_dump(&udp_table, skb, cb, r, bc);
|
||||
}
|
||||
|
||||
static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
|
||||
struct inet_diag_req *req)
|
||||
struct inet_diag_req_v2 *req)
|
||||
{
|
||||
return udp_dump_one(&udp_table, in_skb, nlh, req);
|
||||
}
|
||||
@@ -154,13 +154,13 @@ static const struct inet_diag_handler udp_diag_handler = {
|
||||
};
|
||||
|
||||
static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct inet_diag_req *r, struct nlattr *bc)
|
||||
struct inet_diag_req_v2 *r, struct nlattr *bc)
|
||||
{
|
||||
udp_dump(&udplite_table, skb, cb, r, bc);
|
||||
}
|
||||
|
||||
static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
|
||||
struct inet_diag_req *req)
|
||||
struct inet_diag_req_v2 *req)
|
||||
{
|
||||
return udp_dump_one(&udplite_table, in_skb, nlh, req);
|
||||
}
|
||||
|
@@ -429,7 +429,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
|
||||
ndev->tstamp = jiffies;
|
||||
addrconf_sysctl_register(ndev);
|
||||
/* protected by rtnl_lock */
|
||||
RCU_INIT_POINTER(dev->ip6_ptr, ndev);
|
||||
rcu_assign_pointer(dev->ip6_ptr, ndev);
|
||||
|
||||
/* Join all-node multicast group */
|
||||
ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
|
||||
|
@@ -218,8 +218,8 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
|
||||
{
|
||||
struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
|
||||
|
||||
RCU_INIT_POINTER(t->next , rtnl_dereference(*tp));
|
||||
RCU_INIT_POINTER(*tp, t);
|
||||
rcu_assign_pointer(t->next , rtnl_dereference(*tp));
|
||||
rcu_assign_pointer(*tp, t);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -237,7 +237,7 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
|
||||
(iter = rtnl_dereference(*tp)) != NULL;
|
||||
tp = &iter->next) {
|
||||
if (t == iter) {
|
||||
RCU_INIT_POINTER(*tp, t->next);
|
||||
rcu_assign_pointer(*tp, t->next);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -1450,7 +1450,7 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
|
||||
|
||||
t->parms.proto = IPPROTO_IPV6;
|
||||
dev_hold(dev);
|
||||
RCU_INIT_POINTER(ip6n->tnls_wc[0], t);
|
||||
rcu_assign_pointer(ip6n->tnls_wc[0], t);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -131,7 +131,7 @@ static mh_filter_t __rcu *mh_filter __read_mostly;
|
||||
|
||||
int rawv6_mh_filter_register(mh_filter_t filter)
|
||||
{
|
||||
RCU_INIT_POINTER(mh_filter, filter);
|
||||
rcu_assign_pointer(mh_filter, filter);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rawv6_mh_filter_register);
|
||||
|
@@ -182,7 +182,7 @@ static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
|
||||
(iter = rtnl_dereference(*tp)) != NULL;
|
||||
tp = &iter->next) {
|
||||
if (t == iter) {
|
||||
RCU_INIT_POINTER(*tp, t->next);
|
||||
rcu_assign_pointer(*tp, t->next);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -192,8 +192,8 @@ static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
|
||||
{
|
||||
struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
|
||||
|
||||
RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
|
||||
RCU_INIT_POINTER(*tp, t);
|
||||
rcu_assign_pointer(t->next, rtnl_dereference(*tp));
|
||||
rcu_assign_pointer(*tp, t);
|
||||
}
|
||||
|
||||
static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
|
||||
@@ -393,7 +393,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
|
||||
p->addr = a->addr;
|
||||
p->flags = a->flags;
|
||||
t->prl_count++;
|
||||
RCU_INIT_POINTER(t->prl, p);
|
||||
rcu_assign_pointer(t->prl, p);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
@@ -1177,7 +1177,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
|
||||
if (!dev->tstats)
|
||||
return -ENOMEM;
|
||||
dev_hold(dev);
|
||||
RCU_INIT_POINTER(sitn->tunnels_wc[0], tunnel);
|
||||
rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -332,7 +332,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
|
||||
status = WLAN_STATUS_SUCCESS;
|
||||
|
||||
/* activate it for RX */
|
||||
RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
|
||||
rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
|
||||
|
||||
if (timeout)
|
||||
mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
|
||||
|
@@ -616,7 +616,7 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
|
||||
|
||||
sdata->vif.bss_conf.dtim_period = new->dtim_period;
|
||||
|
||||
RCU_INIT_POINTER(sdata->u.ap.beacon, new);
|
||||
rcu_assign_pointer(sdata->u.ap.beacon, new);
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
@@ -1033,7 +1033,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
RCU_INIT_POINTER(vlansdata->u.vlan.sta, sta);
|
||||
rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
|
||||
}
|
||||
|
||||
sta->sdata = vlansdata;
|
||||
|
@@ -207,7 +207,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
|
||||
*pos++ = 0; /* U-APSD no in use */
|
||||
}
|
||||
|
||||
RCU_INIT_POINTER(ifibss->presp, skb);
|
||||
rcu_assign_pointer(ifibss->presp, skb);
|
||||
|
||||
sdata->vif.bss_conf.beacon_int = beacon_int;
|
||||
sdata->vif.bss_conf.basic_rates = basic_rates;
|
||||
|
@@ -73,7 +73,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
|
||||
if (!s)
|
||||
return -ENOENT;
|
||||
if (s == sta) {
|
||||
RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)],
|
||||
rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)],
|
||||
s->hnext);
|
||||
return 0;
|
||||
}
|
||||
@@ -83,7 +83,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
|
||||
s = rcu_dereference_protected(s->hnext,
|
||||
lockdep_is_held(&local->sta_mtx));
|
||||
if (rcu_access_pointer(s->hnext)) {
|
||||
RCU_INIT_POINTER(s->hnext, sta->hnext);
|
||||
rcu_assign_pointer(s->hnext, sta->hnext);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -226,7 +226,7 @@ static void sta_info_hash_add(struct ieee80211_local *local,
|
||||
{
|
||||
lockdep_assert_held(&local->sta_mtx);
|
||||
sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)];
|
||||
RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
|
||||
rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
|
||||
}
|
||||
|
||||
static void sta_unblock(struct work_struct *wk)
|
||||
|
@@ -106,7 +106,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
|
||||
if (status->flag & RX_FLAG_MMIC_ERROR)
|
||||
goto mic_fail;
|
||||
|
||||
if (!(status->flag & RX_FLAG_IV_STRIPPED))
|
||||
if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
|
||||
goto update_iv;
|
||||
|
||||
return RX_CONTINUE;
|
||||
|
@@ -777,7 +777,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
|
||||
if (exp->helper) {
|
||||
help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
|
||||
if (help)
|
||||
RCU_INIT_POINTER(help->helper, exp->helper);
|
||||
rcu_assign_pointer(help->helper, exp->helper);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NF_CONNTRACK_MARK
|
||||
|
@@ -91,7 +91,7 @@ int nf_conntrack_register_notifier(struct net *net,
|
||||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new);
|
||||
rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
|
||||
mutex_unlock(&nf_ct_ecache_mutex);
|
||||
return ret;
|
||||
|
||||
@@ -128,7 +128,7 @@ int nf_ct_expect_register_notifier(struct net *net,
|
||||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new);
|
||||
rcu_assign_pointer(net->ct.nf_expect_event_cb, new);
|
||||
mutex_unlock(&nf_ct_ecache_mutex);
|
||||
return ret;
|
||||
|
||||
|
@@ -169,7 +169,7 @@ int nf_ct_extend_register(struct nf_ct_ext_type *type)
|
||||
before updating alloc_size */
|
||||
type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
|
||||
+ type->len;
|
||||
RCU_INIT_POINTER(nf_ct_ext_types[type->id], type);
|
||||
rcu_assign_pointer(nf_ct_ext_types[type->id], type);
|
||||
update_alloc_size(type);
|
||||
out:
|
||||
mutex_unlock(&nf_ct_ext_type_mutex);
|
||||
|
@@ -157,7 +157,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
|
||||
memset(&help->help, 0, sizeof(help->help));
|
||||
}
|
||||
|
||||
RCU_INIT_POINTER(help->helper, helper);
|
||||
rcu_assign_pointer(help->helper, helper);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@@ -1172,7 +1172,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
RCU_INIT_POINTER(help->helper, helper);
|
||||
rcu_assign_pointer(help->helper, helper);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -55,7 +55,7 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
|
||||
llog = rcu_dereference_protected(nf_loggers[pf],
|
||||
lockdep_is_held(&nf_log_mutex));
|
||||
if (llog == NULL)
|
||||
RCU_INIT_POINTER(nf_loggers[pf], logger);
|
||||
rcu_assign_pointer(nf_loggers[pf], logger);
|
||||
}
|
||||
|
||||
mutex_unlock(&nf_log_mutex);
|
||||
@@ -92,7 +92,7 @@ int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
|
||||
mutex_unlock(&nf_log_mutex);
|
||||
return -ENOENT;
|
||||
}
|
||||
RCU_INIT_POINTER(nf_loggers[pf], logger);
|
||||
rcu_assign_pointer(nf_loggers[pf], logger);
|
||||
mutex_unlock(&nf_log_mutex);
|
||||
return 0;
|
||||
}
|
||||
@@ -250,7 +250,7 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
|
||||
mutex_unlock(&nf_log_mutex);
|
||||
return -ENOENT;
|
||||
}
|
||||
RCU_INIT_POINTER(nf_loggers[tindex], logger);
|
||||
rcu_assign_pointer(nf_loggers[tindex], logger);
|
||||
mutex_unlock(&nf_log_mutex);
|
||||
} else {
|
||||
mutex_lock(&nf_log_mutex);
|
||||
|
@@ -40,7 +40,7 @@ int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
|
||||
else if (old)
|
||||
ret = -EBUSY;
|
||||
else {
|
||||
RCU_INIT_POINTER(queue_handler[pf], qh);
|
||||
rcu_assign_pointer(queue_handler[pf], qh);
|
||||
ret = 0;
|
||||
}
|
||||
mutex_unlock(&queue_handler_mutex);
|
||||
|
@@ -59,7 +59,7 @@ int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
|
||||
nfnl_unlock();
|
||||
return -EBUSY;
|
||||
}
|
||||
RCU_INIT_POINTER(subsys_table[n->subsys_id], n);
|
||||
rcu_assign_pointer(subsys_table[n->subsys_id], n);
|
||||
nfnl_unlock();
|
||||
|
||||
return 0;
|
||||
@@ -210,7 +210,7 @@ static int __net_init nfnetlink_net_init(struct net *net)
|
||||
if (!nfnl)
|
||||
return -ENOMEM;
|
||||
net->nfnl_stash = nfnl;
|
||||
RCU_INIT_POINTER(net->nfnl, nfnl);
|
||||
rcu_assign_pointer(net->nfnl, nfnl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -282,7 +282,7 @@ int __init netlbl_domhsh_init(u32 size)
|
||||
INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
|
||||
|
||||
spin_lock(&netlbl_domhsh_lock);
|
||||
RCU_INIT_POINTER(netlbl_domhsh, hsh_tbl);
|
||||
rcu_assign_pointer(netlbl_domhsh, hsh_tbl);
|
||||
spin_unlock(&netlbl_domhsh_lock);
|
||||
|
||||
return 0;
|
||||
@@ -330,7 +330,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
|
||||
&rcu_dereference(netlbl_domhsh)->tbl[bkt]);
|
||||
} else {
|
||||
INIT_LIST_HEAD(&entry->list);
|
||||
RCU_INIT_POINTER(netlbl_domhsh_def, entry);
|
||||
rcu_assign_pointer(netlbl_domhsh_def, entry);
|
||||
}
|
||||
|
||||
if (entry->type == NETLBL_NLTYPE_ADDRSELECT) {
|
||||
|
@@ -354,7 +354,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
|
||||
INIT_LIST_HEAD(&iface->list);
|
||||
if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL)
|
||||
goto add_iface_failure;
|
||||
RCU_INIT_POINTER(netlbl_unlhsh_def, iface);
|
||||
rcu_assign_pointer(netlbl_unlhsh_def, iface);
|
||||
}
|
||||
spin_unlock(&netlbl_unlhsh_lock);
|
||||
|
||||
@@ -1447,11 +1447,9 @@ int __init netlbl_unlabel_init(u32 size)
|
||||
for (iter = 0; iter < hsh_tbl->size; iter++)
|
||||
INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock(&netlbl_unlhsh_lock);
|
||||
RCU_INIT_POINTER(netlbl_unlhsh, hsh_tbl);
|
||||
rcu_assign_pointer(netlbl_unlhsh, hsh_tbl);
|
||||
spin_unlock(&netlbl_unlhsh_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
register_netdevice_notifier(&netlbl_unlhsh_netdev_notifier);
|
||||
|
||||
|
@@ -480,7 +480,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
|
||||
if (proto_tab[protocol])
|
||||
err = -EBUSY;
|
||||
else
|
||||
RCU_INIT_POINTER(proto_tab[protocol], pp);
|
||||
rcu_assign_pointer(proto_tab[protocol], pp);
|
||||
mutex_unlock(&proto_tab_lock);
|
||||
|
||||
return err;
|
||||
|
@@ -390,7 +390,7 @@ int phonet_route_add(struct net_device *dev, u8 daddr)
|
||||
daddr = daddr >> 2;
|
||||
mutex_lock(&routes->lock);
|
||||
if (routes->table[daddr] == NULL) {
|
||||
RCU_INIT_POINTER(routes->table[daddr], dev);
|
||||
rcu_assign_pointer(routes->table[daddr], dev);
|
||||
dev_hold(dev);
|
||||
err = 0;
|
||||
}
|
||||
|
@@ -680,7 +680,7 @@ int pn_sock_bind_res(struct sock *sk, u8 res)
|
||||
mutex_lock(&resource_mutex);
|
||||
if (pnres.sk[res] == NULL) {
|
||||
sock_hold(sk);
|
||||
RCU_INIT_POINTER(pnres.sk[res], sk);
|
||||
rcu_assign_pointer(pnres.sk[res], sk);
|
||||
ret = 0;
|
||||
}
|
||||
mutex_unlock(&resource_mutex);
|
||||
|
@@ -477,17 +477,6 @@ void rds_iw_sync_mr(void *trans_private, int direction)
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned int rds_iw_flush_goal(struct rds_iw_mr_pool *pool, int free_all)
|
||||
{
|
||||
unsigned int item_count;
|
||||
|
||||
item_count = atomic_read(&pool->item_count);
|
||||
if (free_all)
|
||||
return item_count;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush our pool of MRs.
|
||||
* At a minimum, all currently unused MRs are unmapped.
|
||||
@@ -500,7 +489,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
|
||||
LIST_HEAD(unmap_list);
|
||||
LIST_HEAD(kill_list);
|
||||
unsigned long flags;
|
||||
unsigned int nfreed = 0, ncleaned = 0, unpinned = 0, free_goal;
|
||||
unsigned int nfreed = 0, ncleaned = 0, unpinned = 0;
|
||||
int ret = 0;
|
||||
|
||||
rds_iw_stats_inc(s_iw_rdma_mr_pool_flush);
|
||||
@@ -514,8 +503,6 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
|
||||
list_splice_init(&pool->clean_list, &kill_list);
|
||||
spin_unlock_irqrestore(&pool->list_lock, flags);
|
||||
|
||||
free_goal = rds_iw_flush_goal(pool, free_all);
|
||||
|
||||
/* Batched invalidate of dirty MRs.
|
||||
* For FMR based MRs, the mappings on the unmap list are
|
||||
* actually members of an ibmr (ibmr->mapping). They either
|
||||
|
@@ -24,6 +24,7 @@
|
||||
#include <net/netlink.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include <net/flow_keys.h>
|
||||
#include <net/red.h>
|
||||
|
||||
|
||||
/* Stochastic Fairness Queuing algorithm.
|
||||
@@ -108,24 +109,30 @@ struct sfq_slot {
|
||||
struct sfq_head dep; /* anchor in dep[] chains */
|
||||
unsigned short hash; /* hash value (index in ht[]) */
|
||||
short allot; /* credit for this slot */
|
||||
|
||||
unsigned int backlog;
|
||||
struct red_vars vars;
|
||||
};
|
||||
|
||||
struct sfq_sched_data {
|
||||
/* frequently used fields */
|
||||
int limit; /* limit of total number of packets in this qdisc */
|
||||
unsigned int divisor; /* number of slots in hash table */
|
||||
unsigned int maxflows; /* number of flows in flows array */
|
||||
int headdrop;
|
||||
int maxdepth; /* limit of packets per flow */
|
||||
u8 headdrop;
|
||||
u8 maxdepth; /* limit of packets per flow */
|
||||
|
||||
u32 perturbation;
|
||||
struct tcf_proto *filter_list;
|
||||
sfq_index cur_depth; /* depth of longest slot */
|
||||
u8 cur_depth; /* depth of longest slot */
|
||||
u8 flags;
|
||||
unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
|
||||
struct sfq_slot *tail; /* current slot in round */
|
||||
struct tcf_proto *filter_list;
|
||||
sfq_index *ht; /* Hash table ('divisor' slots) */
|
||||
struct sfq_slot *slots; /* Flows table ('maxflows' entries) */
|
||||
|
||||
struct red_parms *red_parms;
|
||||
struct tc_sfqred_stats stats;
|
||||
struct sfq_slot *tail; /* current slot in round */
|
||||
|
||||
struct sfq_head dep[SFQ_MAX_DEPTH + 1];
|
||||
/* Linked lists of slots, indexed by depth
|
||||
* dep[0] : list of unused flows
|
||||
@@ -133,6 +140,7 @@ struct sfq_sched_data {
|
||||
* dep[X] : list of flows with X packets
|
||||
*/
|
||||
|
||||
unsigned int maxflows; /* number of flows in flows array */
|
||||
int perturb_period;
|
||||
unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
|
||||
struct timer_list perturb_timer;
|
||||
@@ -321,6 +329,7 @@ static unsigned int sfq_drop(struct Qdisc *sch)
|
||||
drop:
|
||||
skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
|
||||
len = qdisc_pkt_len(skb);
|
||||
slot->backlog -= len;
|
||||
sfq_dec(q, x);
|
||||
kfree_skb(skb);
|
||||
sch->q.qlen--;
|
||||
@@ -341,6 +350,23 @@ drop:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Is ECN parameter configured */
|
||||
static int sfq_prob_mark(const struct sfq_sched_data *q)
|
||||
{
|
||||
return q->flags & TC_RED_ECN;
|
||||
}
|
||||
|
||||
/* Should packets over max threshold just be marked */
|
||||
static int sfq_hard_mark(const struct sfq_sched_data *q)
|
||||
{
|
||||
return (q->flags & (TC_RED_ECN | TC_RED_HARDDROP)) == TC_RED_ECN;
|
||||
}
|
||||
|
||||
static int sfq_headdrop(const struct sfq_sched_data *q)
|
||||
{
|
||||
return q->headdrop;
|
||||
}
|
||||
|
||||
static int
|
||||
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
{
|
||||
@@ -349,6 +375,8 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
sfq_index x, qlen;
|
||||
struct sfq_slot *slot;
|
||||
int uninitialized_var(ret);
|
||||
struct sk_buff *head;
|
||||
int delta;
|
||||
|
||||
hash = sfq_classify(skb, sch, &ret);
|
||||
if (hash == 0) {
|
||||
@@ -368,24 +396,75 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
q->ht[hash] = x;
|
||||
slot = &q->slots[x];
|
||||
slot->hash = hash;
|
||||
slot->backlog = 0; /* should already be 0 anyway... */
|
||||
red_set_vars(&slot->vars);
|
||||
goto enqueue;
|
||||
}
|
||||
if (q->red_parms) {
|
||||
slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
|
||||
&slot->vars,
|
||||
slot->backlog);
|
||||
switch (red_action(q->red_parms,
|
||||
&slot->vars,
|
||||
slot->vars.qavg)) {
|
||||
case RED_DONT_MARK:
|
||||
break;
|
||||
|
||||
case RED_PROB_MARK:
|
||||
sch->qstats.overlimits++;
|
||||
if (sfq_prob_mark(q)) {
|
||||
/* We know we have at least one packet in queue */
|
||||
if (sfq_headdrop(q) &&
|
||||
INET_ECN_set_ce(slot->skblist_next)) {
|
||||
q->stats.prob_mark_head++;
|
||||
break;
|
||||
}
|
||||
if (INET_ECN_set_ce(skb)) {
|
||||
q->stats.prob_mark++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
q->stats.prob_drop++;
|
||||
goto congestion_drop;
|
||||
|
||||
case RED_HARD_MARK:
|
||||
sch->qstats.overlimits++;
|
||||
if (sfq_hard_mark(q)) {
|
||||
/* We know we have at least one packet in queue */
|
||||
if (sfq_headdrop(q) &&
|
||||
INET_ECN_set_ce(slot->skblist_next)) {
|
||||
q->stats.forced_mark_head++;
|
||||
break;
|
||||
}
|
||||
if (INET_ECN_set_ce(skb)) {
|
||||
q->stats.forced_mark++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
q->stats.forced_drop++;
|
||||
goto congestion_drop;
|
||||
}
|
||||
}
|
||||
|
||||
if (slot->qlen >= q->maxdepth) {
|
||||
struct sk_buff *head;
|
||||
|
||||
if (!q->headdrop)
|
||||
congestion_drop:
|
||||
if (!sfq_headdrop(q))
|
||||
return qdisc_drop(skb, sch);
|
||||
|
||||
/* We know we have at least one packet in queue */
|
||||
head = slot_dequeue_head(slot);
|
||||
sch->qstats.backlog -= qdisc_pkt_len(head);
|
||||
delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
|
||||
sch->qstats.backlog -= delta;
|
||||
slot->backlog -= delta;
|
||||
qdisc_drop(head, sch);
|
||||
|
||||
sch->qstats.backlog += qdisc_pkt_len(skb);
|
||||
slot_queue_add(slot, skb);
|
||||
return NET_XMIT_CN;
|
||||
}
|
||||
|
||||
enqueue:
|
||||
sch->qstats.backlog += qdisc_pkt_len(skb);
|
||||
slot->backlog += qdisc_pkt_len(skb);
|
||||
slot_queue_add(slot, skb);
|
||||
sfq_inc(q, x);
|
||||
if (slot->qlen == 1) { /* The flow is new */
|
||||
@@ -396,6 +475,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||
slot->next = q->tail->next;
|
||||
q->tail->next = x;
|
||||
}
|
||||
/* We could use a bigger initial quantum for new flows */
|
||||
slot->allot = q->scaled_quantum;
|
||||
}
|
||||
if (++sch->q.qlen <= q->limit)
|
||||
@@ -439,7 +519,7 @@ next_slot:
|
||||
qdisc_bstats_update(sch, skb);
|
||||
sch->q.qlen--;
|
||||
sch->qstats.backlog -= qdisc_pkt_len(skb);
|
||||
|
||||
slot->backlog -= qdisc_pkt_len(skb);
|
||||
/* Is the slot empty? */
|
||||
if (slot->qlen == 0) {
|
||||
q->ht[slot->hash] = SFQ_EMPTY_SLOT;
|
||||
@@ -490,6 +570,8 @@ static void sfq_rehash(struct Qdisc *sch)
|
||||
sfq_dec(q, i);
|
||||
__skb_queue_tail(&list, skb);
|
||||
}
|
||||
slot->backlog = 0;
|
||||
red_set_vars(&slot->vars);
|
||||
q->ht[slot->hash] = SFQ_EMPTY_SLOT;
|
||||
}
|
||||
q->tail = NULL;
|
||||
@@ -514,6 +596,11 @@ drop: sch->qstats.backlog -= qdisc_pkt_len(skb);
|
||||
if (slot->qlen >= q->maxdepth)
|
||||
goto drop;
|
||||
slot_queue_add(slot, skb);
|
||||
if (q->red_parms)
|
||||
slot->vars.qavg = red_calc_qavg(q->red_parms,
|
||||
&slot->vars,
|
||||
slot->backlog);
|
||||
slot->backlog += qdisc_pkt_len(skb);
|
||||
sfq_inc(q, x);
|
||||
if (slot->qlen == 1) { /* The flow is new */
|
||||
if (q->tail == NULL) { /* It is the first flow */
|
||||
@@ -552,6 +639,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
|
||||
struct tc_sfq_qopt *ctl = nla_data(opt);
|
||||
struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
|
||||
unsigned int qlen;
|
||||
struct red_parms *p = NULL;
|
||||
|
||||
if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
|
||||
return -EINVAL;
|
||||
@@ -560,7 +648,11 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
|
||||
if (ctl->divisor &&
|
||||
(!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
|
||||
return -EINVAL;
|
||||
|
||||
if (ctl_v1 && ctl_v1->qth_min) {
|
||||
p = kmalloc(sizeof(*p), GFP_KERNEL);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
}
|
||||
sch_tree_lock(sch);
|
||||
if (ctl->quantum) {
|
||||
q->quantum = ctl->quantum;
|
||||
@@ -576,6 +668,16 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
|
||||
if (ctl_v1) {
|
||||
if (ctl_v1->depth)
|
||||
q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
|
||||
if (p) {
|
||||
swap(q->red_parms, p);
|
||||
red_set_parms(q->red_parms,
|
||||
ctl_v1->qth_min, ctl_v1->qth_max,
|
||||
ctl_v1->Wlog,
|
||||
ctl_v1->Plog, ctl_v1->Scell_log,
|
||||
NULL,
|
||||
ctl_v1->max_P);
|
||||
}
|
||||
q->flags = ctl_v1->flags;
|
||||
q->headdrop = ctl_v1->headdrop;
|
||||
}
|
||||
if (ctl->limit) {
|
||||
@@ -594,6 +696,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
|
||||
q->perturbation = net_random();
|
||||
}
|
||||
sch_tree_unlock(sch);
|
||||
kfree(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -625,6 +728,7 @@ static void sfq_destroy(struct Qdisc *sch)
|
||||
del_timer_sync(&q->perturb_timer);
|
||||
sfq_free(q->ht);
|
||||
sfq_free(q->slots);
|
||||
kfree(q->red_parms);
|
||||
}
|
||||
|
||||
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
|
||||
@@ -683,6 +787,7 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||
struct sfq_sched_data *q = qdisc_priv(sch);
|
||||
unsigned char *b = skb_tail_pointer(skb);
|
||||
struct tc_sfq_qopt_v1 opt;
|
||||
struct red_parms *p = q->red_parms;
|
||||
|
||||
memset(&opt, 0, sizeof(opt));
|
||||
opt.v0.quantum = q->quantum;
|
||||
@@ -693,6 +798,17 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||
opt.depth = q->maxdepth;
|
||||
opt.headdrop = q->headdrop;
|
||||
|
||||
if (p) {
|
||||
opt.qth_min = p->qth_min >> p->Wlog;
|
||||
opt.qth_max = p->qth_max >> p->Wlog;
|
||||
opt.Wlog = p->Wlog;
|
||||
opt.Plog = p->Plog;
|
||||
opt.Scell_log = p->Scell_log;
|
||||
opt.max_P = p->max_P;
|
||||
}
|
||||
memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
|
||||
opt.flags = q->flags;
|
||||
|
||||
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
|
||||
|
||||
return skb->len;
|
||||
@@ -747,15 +863,13 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
||||
sfq_index idx = q->ht[cl - 1];
|
||||
struct gnet_stats_queue qs = { 0 };
|
||||
struct tc_sfq_xstats xstats = { 0 };
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (idx != SFQ_EMPTY_SLOT) {
|
||||
const struct sfq_slot *slot = &q->slots[idx];
|
||||
|
||||
xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
|
||||
qs.qlen = slot->qlen;
|
||||
slot_queue_walk(slot, skb)
|
||||
qs.backlog += qdisc_pkt_len(skb);
|
||||
qs.backlog = slot->backlog;
|
||||
}
|
||||
if (gnet_stats_copy_queue(d, &qs) < 0)
|
||||
return -1;
|
||||
|
@@ -2492,7 +2492,7 @@ int sock_register(const struct net_proto_family *ops)
|
||||
lockdep_is_held(&net_family_lock)))
|
||||
err = -EEXIST;
|
||||
else {
|
||||
RCU_INIT_POINTER(net_families[ops->family], ops);
|
||||
rcu_assign_pointer(net_families[ops->family], ops);
|
||||
err = 0;
|
||||
}
|
||||
spin_unlock(&net_family_lock);
|
||||
|
@@ -122,7 +122,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
|
||||
if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
|
||||
return;
|
||||
gss_get_ctx(ctx);
|
||||
RCU_INIT_POINTER(gss_cred->gc_ctx, ctx);
|
||||
rcu_assign_pointer(gss_cred->gc_ctx, ctx);
|
||||
set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
|
||||
smp_mb__before_clear_bit();
|
||||
clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
|
||||
|
@@ -2250,6 +2250,7 @@ static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = {
|
||||
};
|
||||
|
||||
static int parse_station_flags(struct genl_info *info,
|
||||
enum nl80211_iftype iftype,
|
||||
struct station_parameters *params)
|
||||
{
|
||||
struct nlattr *flags[NL80211_STA_FLAG_MAX + 1];
|
||||
@@ -2283,8 +2284,33 @@ static int parse_station_flags(struct genl_info *info,
|
||||
nla, sta_flags_policy))
|
||||
return -EINVAL;
|
||||
|
||||
params->sta_flags_mask = (1 << __NL80211_STA_FLAG_AFTER_LAST) - 1;
|
||||
params->sta_flags_mask &= ~1;
|
||||
/*
|
||||
* Only allow certain flags for interface types so that
|
||||
* other attributes are silently ignored. Remember that
|
||||
* this is backward compatibility code with old userspace
|
||||
* and shouldn't be hit in other cases anyway.
|
||||
*/
|
||||
switch (iftype) {
|
||||
case NL80211_IFTYPE_AP:
|
||||
case NL80211_IFTYPE_AP_VLAN:
|
||||
case NL80211_IFTYPE_P2P_GO:
|
||||
params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHORIZED) |
|
||||
BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
|
||||
BIT(NL80211_STA_FLAG_WME) |
|
||||
BIT(NL80211_STA_FLAG_MFP);
|
||||
break;
|
||||
case NL80211_IFTYPE_P2P_CLIENT:
|
||||
case NL80211_IFTYPE_STATION:
|
||||
params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHORIZED) |
|
||||
BIT(NL80211_STA_FLAG_TDLS_PEER);
|
||||
break;
|
||||
case NL80211_IFTYPE_MESH_POINT:
|
||||
params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) |
|
||||
BIT(NL80211_STA_FLAG_MFP) |
|
||||
BIT(NL80211_STA_FLAG_AUTHORIZED);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++)
|
||||
if (flags[flag])
|
||||
@@ -2585,7 +2611,7 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
|
||||
if (!rdev->ops->change_station)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (parse_station_flags(info, ¶ms))
|
||||
if (parse_station_flags(info, dev->ieee80211_ptr->iftype, ¶ms))
|
||||
return -EINVAL;
|
||||
|
||||
if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION])
|
||||
@@ -2731,7 +2757,7 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
|
||||
if (!rdev->ops->add_station)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (parse_station_flags(info, ¶ms))
|
||||
if (parse_station_flags(info, dev->ieee80211_ptr->iftype, ¶ms))
|
||||
return -EINVAL;
|
||||
|
||||
switch (dev->ieee80211_ptr->iftype) {
|
||||
|
@@ -2927,7 +2927,7 @@ static int __net_init xfrm_user_net_init(struct net *net)
|
||||
if (nlsk == NULL)
|
||||
return -ENOMEM;
|
||||
net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
|
||||
RCU_INIT_POINTER(net->xfrm.nlsk, nlsk);
|
||||
rcu_assign_pointer(net->xfrm.nlsk, nlsk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user