Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Mostly simple overlapping changes. For example, David Ahern's adjacency list revamp in 'net-next' conflicted with an adjacency list traversal bug fix in 'net'. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -663,7 +663,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
|
||||
|
||||
skb_gro_pull(skb, sizeof(*vhdr));
|
||||
skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
|
||||
pp = ptype->callbacks.gro_receive(head, skb);
|
||||
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
@@ -652,7 +652,6 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
|
||||
batadv_softif_destroy_sysfs(hard_iface->soft_iface);
|
||||
}
|
||||
|
||||
hard_iface->soft_iface = NULL;
|
||||
batadv_hardif_put(hard_iface);
|
||||
|
||||
out:
|
||||
|
@@ -63,7 +63,7 @@ enum batadv_dbg_level {
|
||||
BATADV_DBG_NC = BIT(5),
|
||||
BATADV_DBG_MCAST = BIT(6),
|
||||
BATADV_DBG_TP_METER = BIT(7),
|
||||
BATADV_DBG_ALL = 127,
|
||||
BATADV_DBG_ALL = 255,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BATMAN_ADV_DEBUG
|
||||
|
@@ -544,7 +544,7 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
|
||||
if (bat_priv->algo_ops->neigh.hardif_init)
|
||||
bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
|
||||
|
||||
hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
|
||||
hlist_add_head_rcu(&hardif_neigh->list, &hard_iface->neigh_list);
|
||||
|
||||
out:
|
||||
spin_unlock_bh(&hard_iface->neigh_list_lock);
|
||||
|
@@ -969,41 +969,38 @@ void __hci_req_enable_advertising(struct hci_request *req)
|
||||
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
|
||||
}
|
||||
|
||||
static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
|
||||
u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
|
||||
{
|
||||
size_t complete_len;
|
||||
size_t short_len;
|
||||
int max_len;
|
||||
size_t complete_len;
|
||||
|
||||
max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
|
||||
/* no space left for name (+ NULL + type + len) */
|
||||
if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
|
||||
return ad_len;
|
||||
|
||||
/* use complete name if present and fits */
|
||||
complete_len = strlen(hdev->dev_name);
|
||||
short_len = strlen(hdev->short_name);
|
||||
|
||||
/* no space left for name */
|
||||
if (max_len < 1)
|
||||
return ad_len;
|
||||
|
||||
/* no name set */
|
||||
if (!complete_len)
|
||||
return ad_len;
|
||||
|
||||
/* complete name fits and is eq to max short name len or smaller */
|
||||
if (complete_len <= max_len &&
|
||||
complete_len <= HCI_MAX_SHORT_NAME_LENGTH) {
|
||||
if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
|
||||
return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
|
||||
hdev->dev_name, complete_len);
|
||||
}
|
||||
hdev->dev_name, complete_len + 1);
|
||||
|
||||
/* short name set and fits */
|
||||
if (short_len && short_len <= max_len) {
|
||||
/* use short name if present */
|
||||
short_len = strlen(hdev->short_name);
|
||||
if (short_len)
|
||||
return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
|
||||
hdev->short_name, short_len);
|
||||
}
|
||||
hdev->short_name, short_len + 1);
|
||||
|
||||
/* no short name set so shorten complete name */
|
||||
if (!short_len) {
|
||||
return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
|
||||
hdev->dev_name, max_len);
|
||||
/* use shortened full name if present, we already know that name
|
||||
* is longer then HCI_MAX_SHORT_NAME_LENGTH
|
||||
*/
|
||||
if (complete_len) {
|
||||
u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
|
||||
|
||||
memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
|
||||
name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
|
||||
|
||||
return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
|
||||
sizeof(name));
|
||||
}
|
||||
|
||||
return ad_len;
|
||||
|
@@ -106,6 +106,8 @@ static inline void hci_update_background_scan(struct hci_dev *hdev)
|
||||
void hci_request_setup(struct hci_dev *hdev);
|
||||
void hci_request_cancel_all(struct hci_dev *hdev);
|
||||
|
||||
u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len);
|
||||
|
||||
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type,
|
||||
u8 *data, u8 data_len)
|
||||
{
|
||||
|
@@ -6017,7 +6017,15 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
|
||||
return err;
|
||||
}
|
||||
|
||||
static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
|
||||
static u8 calculate_name_len(struct hci_dev *hdev)
|
||||
{
|
||||
u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
|
||||
|
||||
return append_local_name(hdev, buf, 0);
|
||||
}
|
||||
|
||||
static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
|
||||
bool is_adv_data)
|
||||
{
|
||||
u8 max_len = HCI_MAX_AD_LENGTH;
|
||||
|
||||
@@ -6030,9 +6038,8 @@ static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
|
||||
if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
|
||||
max_len -= 3;
|
||||
} else {
|
||||
/* at least 1 byte of name should fit in */
|
||||
if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
|
||||
max_len -= 3;
|
||||
max_len -= calculate_name_len(hdev);
|
||||
|
||||
if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
|
||||
max_len -= 4;
|
||||
@@ -6063,12 +6070,13 @@ static bool appearance_managed(u32 adv_flags)
|
||||
return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
|
||||
}
|
||||
|
||||
static bool tlv_data_is_valid(u32 adv_flags, u8 *data, u8 len, bool is_adv_data)
|
||||
static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
|
||||
u8 len, bool is_adv_data)
|
||||
{
|
||||
int i, cur_len;
|
||||
u8 max_len;
|
||||
|
||||
max_len = tlv_data_max_len(adv_flags, is_adv_data);
|
||||
max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
|
||||
|
||||
if (len > max_len)
|
||||
return false;
|
||||
@@ -6215,8 +6223,8 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (!tlv_data_is_valid(flags, cp->data, cp->adv_data_len, true) ||
|
||||
!tlv_data_is_valid(flags, cp->data + cp->adv_data_len,
|
||||
if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
|
||||
!tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
|
||||
cp->scan_rsp_len, false)) {
|
||||
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
@@ -6429,8 +6437,8 @@ static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
|
||||
|
||||
rp.instance = cp->instance;
|
||||
rp.flags = cp->flags;
|
||||
rp.max_adv_data_len = tlv_data_max_len(flags, true);
|
||||
rp.max_scan_rsp_len = tlv_data_max_len(flags, false);
|
||||
rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
|
||||
rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
|
||||
|
||||
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
|
||||
MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
|
||||
|
@@ -972,13 +972,12 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query)
|
||||
mod_timer(&query->timer, jiffies);
|
||||
}
|
||||
|
||||
void br_multicast_enable_port(struct net_bridge_port *port)
|
||||
static void __br_multicast_enable_port(struct net_bridge_port *port)
|
||||
{
|
||||
struct net_bridge *br = port->br;
|
||||
|
||||
spin_lock(&br->multicast_lock);
|
||||
if (br->multicast_disabled || !netif_running(br->dev))
|
||||
goto out;
|
||||
return;
|
||||
|
||||
br_multicast_enable(&port->ip4_own_query);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
@@ -987,8 +986,14 @@ void br_multicast_enable_port(struct net_bridge_port *port)
|
||||
if (port->multicast_router == MDB_RTR_TYPE_PERM &&
|
||||
hlist_unhashed(&port->rlist))
|
||||
br_multicast_add_router(br, port);
|
||||
}
|
||||
|
||||
out:
|
||||
void br_multicast_enable_port(struct net_bridge_port *port)
|
||||
{
|
||||
struct net_bridge *br = port->br;
|
||||
|
||||
spin_lock(&br->multicast_lock);
|
||||
__br_multicast_enable_port(port);
|
||||
spin_unlock(&br->multicast_lock);
|
||||
}
|
||||
|
||||
@@ -1994,8 +1999,9 @@ static void br_multicast_start_querier(struct net_bridge *br,
|
||||
|
||||
int br_multicast_toggle(struct net_bridge *br, unsigned long val)
|
||||
{
|
||||
int err = 0;
|
||||
struct net_bridge_mdb_htable *mdb;
|
||||
struct net_bridge_port *port;
|
||||
int err = 0;
|
||||
|
||||
spin_lock_bh(&br->multicast_lock);
|
||||
if (br->multicast_disabled == !val)
|
||||
@@ -2023,10 +2029,9 @@ rollback:
|
||||
goto rollback;
|
||||
}
|
||||
|
||||
br_multicast_start_querier(br, &br->ip4_own_query);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
br_multicast_start_querier(br, &br->ip6_own_query);
|
||||
#endif
|
||||
br_multicast_open(br);
|
||||
list_for_each_entry(port, &br->port_list, list)
|
||||
__br_multicast_enable_port(port);
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&br->multicast_lock);
|
||||
|
@@ -26,7 +26,7 @@ struct page **ceph_get_direct_page_vector(const void __user *data,
|
||||
while (got < num_pages) {
|
||||
rc = get_user_pages_unlocked(
|
||||
(unsigned long)data + ((unsigned long)got * PAGE_SIZE),
|
||||
num_pages - got, write_page, 0, pages + got);
|
||||
num_pages - got, pages + got, write_page ? FOLL_WRITE : 0);
|
||||
if (rc < 0)
|
||||
break;
|
||||
BUG_ON(rc == 0);
|
||||
|
@@ -2899,6 +2899,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
|
||||
}
|
||||
return head;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
|
||||
|
||||
static void qdisc_pkt_len_init(struct sk_buff *skb)
|
||||
{
|
||||
@@ -3709,7 +3710,7 @@ int netif_rx_ni(struct sk_buff *skb)
|
||||
}
|
||||
EXPORT_SYMBOL(netif_rx_ni);
|
||||
|
||||
static void net_tx_action(struct softirq_action *h)
|
||||
static __latent_entropy void net_tx_action(struct softirq_action *h)
|
||||
{
|
||||
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
|
||||
|
||||
@@ -4375,6 +4376,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
|
||||
NAPI_GRO_CB(skb)->flush = 0;
|
||||
NAPI_GRO_CB(skb)->free = 0;
|
||||
NAPI_GRO_CB(skb)->encap_mark = 0;
|
||||
NAPI_GRO_CB(skb)->recursion_counter = 0;
|
||||
NAPI_GRO_CB(skb)->is_fou = 0;
|
||||
NAPI_GRO_CB(skb)->is_atomic = 1;
|
||||
NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
|
||||
@@ -5062,7 +5064,7 @@ out_unlock:
|
||||
return work;
|
||||
}
|
||||
|
||||
static void net_rx_action(struct softirq_action *h)
|
||||
static __latent_entropy void net_rx_action(struct softirq_action *h)
|
||||
{
|
||||
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
|
||||
unsigned long time_limit = jiffies + 2;
|
||||
|
@@ -246,15 +246,13 @@ ipv6:
|
||||
case htons(ETH_P_8021AD):
|
||||
case htons(ETH_P_8021Q): {
|
||||
const struct vlan_hdr *vlan;
|
||||
struct vlan_hdr _vlan;
|
||||
bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
|
||||
|
||||
if (skb_vlan_tag_present(skb))
|
||||
if (vlan_tag_present)
|
||||
proto = skb->protocol;
|
||||
|
||||
if (!skb_vlan_tag_present(skb) ||
|
||||
proto == cpu_to_be16(ETH_P_8021Q) ||
|
||||
proto == cpu_to_be16(ETH_P_8021AD)) {
|
||||
struct vlan_hdr _vlan;
|
||||
|
||||
if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
|
||||
vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
|
||||
data, hlen, &_vlan);
|
||||
if (!vlan)
|
||||
@@ -272,7 +270,7 @@ ipv6:
|
||||
FLOW_DISSECTOR_KEY_VLAN,
|
||||
target_container);
|
||||
|
||||
if (skb_vlan_tag_present(skb)) {
|
||||
if (vlan_tag_present) {
|
||||
key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
|
||||
key_vlan->vlan_priority =
|
||||
(skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
|
||||
|
@@ -215,13 +215,14 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id);
|
||||
*/
|
||||
int peernet2id_alloc(struct net *net, struct net *peer)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool alloc;
|
||||
int id;
|
||||
|
||||
spin_lock_bh(&net->nsid_lock);
|
||||
spin_lock_irqsave(&net->nsid_lock, flags);
|
||||
alloc = atomic_read(&peer->count) == 0 ? false : true;
|
||||
id = __peernet2id_alloc(net, peer, &alloc);
|
||||
spin_unlock_bh(&net->nsid_lock);
|
||||
spin_unlock_irqrestore(&net->nsid_lock, flags);
|
||||
if (alloc && id >= 0)
|
||||
rtnl_net_notifyid(net, RTM_NEWNSID, id);
|
||||
return id;
|
||||
@@ -230,11 +231,12 @@ int peernet2id_alloc(struct net *net, struct net *peer)
|
||||
/* This function returns, if assigned, the id of a peer netns. */
|
||||
int peernet2id(struct net *net, struct net *peer)
|
||||
{
|
||||
unsigned long flags;
|
||||
int id;
|
||||
|
||||
spin_lock_bh(&net->nsid_lock);
|
||||
spin_lock_irqsave(&net->nsid_lock, flags);
|
||||
id = __peernet2id(net, peer);
|
||||
spin_unlock_bh(&net->nsid_lock);
|
||||
spin_unlock_irqrestore(&net->nsid_lock, flags);
|
||||
return id;
|
||||
}
|
||||
EXPORT_SYMBOL(peernet2id);
|
||||
@@ -249,17 +251,18 @@ bool peernet_has_id(struct net *net, struct net *peer)
|
||||
|
||||
struct net *get_net_ns_by_id(struct net *net, int id)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct net *peer;
|
||||
|
||||
if (id < 0)
|
||||
return NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock_bh(&net->nsid_lock);
|
||||
spin_lock_irqsave(&net->nsid_lock, flags);
|
||||
peer = idr_find(&net->netns_ids, id);
|
||||
if (peer)
|
||||
get_net(peer);
|
||||
spin_unlock_bh(&net->nsid_lock);
|
||||
spin_unlock_irqrestore(&net->nsid_lock, flags);
|
||||
rcu_read_unlock();
|
||||
|
||||
return peer;
|
||||
@@ -429,17 +432,17 @@ static void cleanup_net(struct work_struct *work)
|
||||
for_each_net(tmp) {
|
||||
int id;
|
||||
|
||||
spin_lock_bh(&tmp->nsid_lock);
|
||||
spin_lock_irq(&tmp->nsid_lock);
|
||||
id = __peernet2id(tmp, net);
|
||||
if (id >= 0)
|
||||
idr_remove(&tmp->netns_ids, id);
|
||||
spin_unlock_bh(&tmp->nsid_lock);
|
||||
spin_unlock_irq(&tmp->nsid_lock);
|
||||
if (id >= 0)
|
||||
rtnl_net_notifyid(tmp, RTM_DELNSID, id);
|
||||
}
|
||||
spin_lock_bh(&net->nsid_lock);
|
||||
spin_lock_irq(&net->nsid_lock);
|
||||
idr_destroy(&net->netns_ids);
|
||||
spin_unlock_bh(&net->nsid_lock);
|
||||
spin_unlock_irq(&net->nsid_lock);
|
||||
|
||||
}
|
||||
rtnl_unlock();
|
||||
@@ -568,6 +571,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
{
|
||||
struct net *net = sock_net(skb->sk);
|
||||
struct nlattr *tb[NETNSA_MAX + 1];
|
||||
unsigned long flags;
|
||||
struct net *peer;
|
||||
int nsid, err;
|
||||
|
||||
@@ -588,15 +592,15 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
if (IS_ERR(peer))
|
||||
return PTR_ERR(peer);
|
||||
|
||||
spin_lock_bh(&net->nsid_lock);
|
||||
spin_lock_irqsave(&net->nsid_lock, flags);
|
||||
if (__peernet2id(net, peer) >= 0) {
|
||||
spin_unlock_bh(&net->nsid_lock);
|
||||
spin_unlock_irqrestore(&net->nsid_lock, flags);
|
||||
err = -EEXIST;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = alloc_netid(net, peer, nsid);
|
||||
spin_unlock_bh(&net->nsid_lock);
|
||||
spin_unlock_irqrestore(&net->nsid_lock, flags);
|
||||
if (err >= 0) {
|
||||
rtnl_net_notifyid(net, RTM_NEWNSID, err);
|
||||
err = 0;
|
||||
@@ -718,10 +722,11 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
.idx = 0,
|
||||
.s_idx = cb->args[0],
|
||||
};
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_bh(&net->nsid_lock);
|
||||
spin_lock_irqsave(&net->nsid_lock, flags);
|
||||
idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
|
||||
spin_unlock_bh(&net->nsid_lock);
|
||||
spin_unlock_irqrestore(&net->nsid_lock, flags);
|
||||
|
||||
cb->args[0] = net_cb.idx;
|
||||
return skb->len;
|
||||
|
@@ -216,8 +216,8 @@
|
||||
#define M_QUEUE_XMIT 2 /* Inject packet into qdisc */
|
||||
|
||||
/* If lock -- protects updating of if_list */
|
||||
#define if_lock(t) spin_lock(&(t->if_lock));
|
||||
#define if_unlock(t) spin_unlock(&(t->if_lock));
|
||||
#define if_lock(t) mutex_lock(&(t->if_lock));
|
||||
#define if_unlock(t) mutex_unlock(&(t->if_lock));
|
||||
|
||||
/* Used to help with determining the pkts on receive */
|
||||
#define PKTGEN_MAGIC 0xbe9be955
|
||||
@@ -423,7 +423,7 @@ struct pktgen_net {
|
||||
};
|
||||
|
||||
struct pktgen_thread {
|
||||
spinlock_t if_lock; /* for list of devices */
|
||||
struct mutex if_lock; /* for list of devices */
|
||||
struct list_head if_list; /* All device here */
|
||||
struct list_head th_list;
|
||||
struct task_struct *tsk;
|
||||
@@ -2010,11 +2010,13 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
|
||||
{
|
||||
struct pktgen_thread *t;
|
||||
|
||||
mutex_lock(&pktgen_thread_lock);
|
||||
|
||||
list_for_each_entry(t, &pn->pktgen_threads, th_list) {
|
||||
struct pktgen_dev *pkt_dev;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
|
||||
if_lock(t);
|
||||
list_for_each_entry(pkt_dev, &t->if_list, list) {
|
||||
if (pkt_dev->odev != dev)
|
||||
continue;
|
||||
|
||||
@@ -2029,8 +2031,9 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
|
||||
dev->name);
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if_unlock(t);
|
||||
}
|
||||
mutex_unlock(&pktgen_thread_lock);
|
||||
}
|
||||
|
||||
static int pktgen_device_event(struct notifier_block *unused,
|
||||
@@ -3762,7 +3765,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
spin_lock_init(&t->if_lock);
|
||||
mutex_init(&t->if_lock);
|
||||
t->cpu = cpu;
|
||||
|
||||
INIT_LIST_HEAD(&t->if_list);
|
||||
|
@@ -129,7 +129,6 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(reuseport_add_sock);
|
||||
|
||||
static void reuseport_free_rcu(struct rcu_head *head)
|
||||
{
|
||||
|
@@ -440,7 +440,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
|
||||
|
||||
skb_gro_pull(skb, sizeof(*eh));
|
||||
skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
|
||||
pp = ptype->callbacks.gro_receive(head, skb);
|
||||
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
@@ -300,10 +300,6 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
|
||||
static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
|
||||
struct hsr_frame_info *frame)
|
||||
{
|
||||
struct net_device *master_dev;
|
||||
|
||||
master_dev = hsr_port_get_hsr(hsr, HSR_PT_MASTER)->dev;
|
||||
|
||||
if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
|
||||
frame->is_local_exclusive = true;
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
|
@@ -1391,7 +1391,7 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
|
||||
skb_gro_pull(skb, sizeof(*iph));
|
||||
skb_set_transport_header(skb, skb_gro_offset(skb));
|
||||
|
||||
pp = ops->callbacks.gro_receive(head, skb);
|
||||
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
@@ -249,7 +249,7 @@ static struct sk_buff **fou_gro_receive(struct sock *sk,
|
||||
if (!ops || !ops->callbacks.gro_receive)
|
||||
goto out_unlock;
|
||||
|
||||
pp = ops->callbacks.gro_receive(head, skb);
|
||||
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
@@ -441,7 +441,7 @@ next_proto:
|
||||
if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
|
||||
goto out_unlock;
|
||||
|
||||
pp = ops->callbacks.gro_receive(head, skb);
|
||||
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
|
||||
flush = 0;
|
||||
|
||||
out_unlock:
|
||||
|
@@ -229,7 +229,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
|
||||
/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
|
||||
skb_gro_postpull_rcsum(skb, greh, grehlen);
|
||||
|
||||
pp = ptype->callbacks.gro_receive(head, skb);
|
||||
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
|
||||
flush = 0;
|
||||
|
||||
out_unlock:
|
||||
|
@@ -25,6 +25,7 @@
|
||||
#include <net/inet_hashtables.h>
|
||||
#include <net/secure_seq.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/tcp.h>
|
||||
#include <net/sock_reuseport.h>
|
||||
|
||||
static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
|
||||
@@ -172,7 +173,7 @@ EXPORT_SYMBOL_GPL(__inet_inherit_port);
|
||||
|
||||
static inline int compute_score(struct sock *sk, struct net *net,
|
||||
const unsigned short hnum, const __be32 daddr,
|
||||
const int dif)
|
||||
const int dif, bool exact_dif)
|
||||
{
|
||||
int score = -1;
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
@@ -186,7 +187,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
|
||||
return -1;
|
||||
score += 4;
|
||||
}
|
||||
if (sk->sk_bound_dev_if) {
|
||||
if (sk->sk_bound_dev_if || exact_dif) {
|
||||
if (sk->sk_bound_dev_if != dif)
|
||||
return -1;
|
||||
score += 4;
|
||||
@@ -215,11 +216,12 @@ struct sock *__inet_lookup_listener(struct net *net,
|
||||
unsigned int hash = inet_lhashfn(net, hnum);
|
||||
struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
|
||||
int score, hiscore = 0, matches = 0, reuseport = 0;
|
||||
bool exact_dif = inet_exact_dif_match(net, skb);
|
||||
struct sock *sk, *result = NULL;
|
||||
u32 phash = 0;
|
||||
|
||||
sk_for_each_rcu(sk, &ilb->head) {
|
||||
score = compute_score(sk, net, hnum, daddr, dif);
|
||||
score = compute_score(sk, net, hnum, daddr, dif, exact_dif);
|
||||
if (score > hiscore) {
|
||||
reuseport = sk->sk_reuseport;
|
||||
if (reuseport) {
|
||||
|
@@ -538,7 +538,6 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
||||
{
|
||||
struct iphdr *iph;
|
||||
int ptr;
|
||||
struct net_device *dev;
|
||||
struct sk_buff *skb2;
|
||||
unsigned int mtu, hlen, left, len, ll_rs;
|
||||
int offset;
|
||||
@@ -546,8 +545,6 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
||||
struct rtable *rt = skb_rtable(skb);
|
||||
int err = 0;
|
||||
|
||||
dev = rt->dst.dev;
|
||||
|
||||
/* for offloaded checksums cleanup checksum before fragmentation */
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL &&
|
||||
(err = skb_checksum_help(skb)))
|
||||
|
@@ -98,7 +98,7 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
|
||||
int offset)
|
||||
int tlen, int offset)
|
||||
{
|
||||
__wsum csum = skb->csum;
|
||||
|
||||
@@ -106,8 +106,9 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
|
||||
return;
|
||||
|
||||
if (offset != 0)
|
||||
csum = csum_sub(csum, csum_partial(skb_transport_header(skb),
|
||||
offset, 0));
|
||||
csum = csum_sub(csum,
|
||||
csum_partial(skb_transport_header(skb) + tlen,
|
||||
offset, 0));
|
||||
|
||||
put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
|
||||
}
|
||||
@@ -153,7 +154,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
|
||||
int offset)
|
||||
int tlen, int offset)
|
||||
{
|
||||
struct inet_sock *inet = inet_sk(skb->sk);
|
||||
unsigned int flags = inet->cmsg_flags;
|
||||
@@ -216,7 +217,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
if (flags & IP_CMSG_CHECKSUM)
|
||||
ip_cmsg_recv_checksum(msg, skb, offset);
|
||||
ip_cmsg_recv_checksum(msg, skb, tlen, offset);
|
||||
}
|
||||
EXPORT_SYMBOL(ip_cmsg_recv_offset);
|
||||
|
||||
|
@@ -994,7 +994,7 @@ struct proto ping_prot = {
|
||||
.init = ping_init_sock,
|
||||
.close = ping_close,
|
||||
.connect = ip4_datagram_connect,
|
||||
.disconnect = udp_disconnect,
|
||||
.disconnect = __udp_disconnect,
|
||||
.setsockopt = ip_setsockopt,
|
||||
.getsockopt = ip_getsockopt,
|
||||
.sendmsg = ping_v4_sendmsg,
|
||||
|
@@ -934,7 +934,7 @@ struct proto raw_prot = {
|
||||
.close = raw_close,
|
||||
.destroy = raw_destroy,
|
||||
.connect = ip4_datagram_connect,
|
||||
.disconnect = udp_disconnect,
|
||||
.disconnect = __udp_disconnect,
|
||||
.ioctl = raw_ioctl,
|
||||
.init = raw_init,
|
||||
.setsockopt = raw_setsockopt,
|
||||
|
@@ -96,11 +96,11 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
|
||||
container_of(table->data, struct net, ipv4.ping_group_range.range);
|
||||
unsigned int seq;
|
||||
do {
|
||||
seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
|
||||
seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
|
||||
|
||||
*low = data[0];
|
||||
*high = data[1];
|
||||
} while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
|
||||
} while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
|
||||
}
|
||||
|
||||
/* Update system visible IP port range */
|
||||
@@ -109,10 +109,10 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
|
||||
kgid_t *data = table->data;
|
||||
struct net *net =
|
||||
container_of(table->data, struct net, ipv4.ping_group_range.range);
|
||||
write_seqlock(&net->ipv4.ip_local_ports.lock);
|
||||
write_seqlock(&net->ipv4.ping_group_range.lock);
|
||||
data[0] = low;
|
||||
data[1] = high;
|
||||
write_sequnlock(&net->ipv4.ip_local_ports.lock);
|
||||
write_sequnlock(&net->ipv4.ping_group_range.lock);
|
||||
}
|
||||
|
||||
/* Validate changes from /proc interface. */
|
||||
|
@@ -86,7 +86,6 @@
|
||||
|
||||
int sysctl_tcp_tw_reuse __read_mostly;
|
||||
int sysctl_tcp_low_latency __read_mostly;
|
||||
EXPORT_SYMBOL(sysctl_tcp_low_latency);
|
||||
|
||||
#ifdef CONFIG_TCP_MD5SIG
|
||||
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
|
||||
@@ -1887,7 +1886,6 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
|
||||
struct tcp_iter_state *st = seq->private;
|
||||
struct net *net = seq_file_net(seq);
|
||||
struct inet_listen_hashbucket *ilb;
|
||||
struct inet_connection_sock *icsk;
|
||||
struct sock *sk = cur;
|
||||
|
||||
if (!sk) {
|
||||
@@ -1909,7 +1907,6 @@ get_sk:
|
||||
continue;
|
||||
if (sk->sk_family == st->family)
|
||||
return sk;
|
||||
icsk = inet_csk(sk);
|
||||
}
|
||||
spin_unlock(&ilb->lock);
|
||||
st->offset = 0;
|
||||
|
@@ -1420,7 +1420,7 @@ try_again:
|
||||
*addr_len = sizeof(*sin);
|
||||
}
|
||||
if (inet->cmsg_flags)
|
||||
ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr) + off);
|
||||
ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off);
|
||||
|
||||
err = copied;
|
||||
if (flags & MSG_TRUNC)
|
||||
@@ -1442,7 +1442,7 @@ csum_copy_err:
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
int udp_disconnect(struct sock *sk, int flags)
|
||||
int __udp_disconnect(struct sock *sk, int flags)
|
||||
{
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
/*
|
||||
@@ -1464,6 +1464,15 @@ int udp_disconnect(struct sock *sk, int flags)
|
||||
sk_dst_reset(sk);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(__udp_disconnect);
|
||||
|
||||
int udp_disconnect(struct sock *sk, int flags)
|
||||
{
|
||||
lock_sock(sk);
|
||||
__udp_disconnect(sk, flags);
|
||||
release_sock(sk);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(udp_disconnect);
|
||||
|
||||
void udp_lib_unhash(struct sock *sk)
|
||||
@@ -2272,7 +2281,7 @@ int udp_abort(struct sock *sk, int err)
|
||||
|
||||
sk->sk_err = err;
|
||||
sk->sk_error_report(sk);
|
||||
udp_disconnect(sk, 0);
|
||||
__udp_disconnect(sk, 0);
|
||||
|
||||
release_sock(sk);
|
||||
|
||||
|
@@ -295,7 +295,7 @@ unflush:
|
||||
|
||||
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
|
||||
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
|
||||
pp = udp_sk(sk)->gro_receive(sk, head, skb);
|
||||
pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
@@ -147,9 +147,8 @@ static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __ipv6_regen_rndid(struct inet6_dev *idev);
|
||||
static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
|
||||
static void ipv6_regen_rndid(unsigned long data);
|
||||
static void ipv6_regen_rndid(struct inet6_dev *idev);
|
||||
static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
|
||||
|
||||
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
|
||||
static int ipv6_count_addresses(struct inet6_dev *idev);
|
||||
@@ -409,9 +408,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
|
||||
goto err_release;
|
||||
}
|
||||
|
||||
/* One reference from device. We must do this before
|
||||
* we invoke __ipv6_regen_rndid().
|
||||
*/
|
||||
/* One reference from device. */
|
||||
in6_dev_hold(ndev);
|
||||
|
||||
if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
|
||||
@@ -425,17 +422,15 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
|
||||
#endif
|
||||
|
||||
INIT_LIST_HEAD(&ndev->tempaddr_list);
|
||||
setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev);
|
||||
ndev->desync_factor = U32_MAX;
|
||||
if ((dev->flags&IFF_LOOPBACK) ||
|
||||
dev->type == ARPHRD_TUNNEL ||
|
||||
dev->type == ARPHRD_TUNNEL6 ||
|
||||
dev->type == ARPHRD_SIT ||
|
||||
dev->type == ARPHRD_NONE) {
|
||||
ndev->cnf.use_tempaddr = -1;
|
||||
} else {
|
||||
in6_dev_hold(ndev);
|
||||
ipv6_regen_rndid((unsigned long) ndev);
|
||||
}
|
||||
} else
|
||||
ipv6_regen_rndid(ndev);
|
||||
|
||||
ndev->token = in6addr_any;
|
||||
|
||||
@@ -447,7 +442,6 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
|
||||
err = addrconf_sysctl_register(ndev);
|
||||
if (err) {
|
||||
ipv6_mc_destroy_dev(ndev);
|
||||
del_timer(&ndev->regen_timer);
|
||||
snmp6_unregister_dev(ndev);
|
||||
goto err_release;
|
||||
}
|
||||
@@ -1190,6 +1184,8 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
|
||||
int ret = 0;
|
||||
u32 addr_flags;
|
||||
unsigned long now = jiffies;
|
||||
long max_desync_factor;
|
||||
s32 cnf_temp_preferred_lft;
|
||||
|
||||
write_lock_bh(&idev->lock);
|
||||
if (ift) {
|
||||
@@ -1222,23 +1218,42 @@ retry:
|
||||
}
|
||||
in6_ifa_hold(ifp);
|
||||
memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
|
||||
__ipv6_try_regen_rndid(idev, tmpaddr);
|
||||
ipv6_try_regen_rndid(idev, tmpaddr);
|
||||
memcpy(&addr.s6_addr[8], idev->rndid, 8);
|
||||
age = (now - ifp->tstamp) / HZ;
|
||||
tmp_valid_lft = min_t(__u32,
|
||||
ifp->valid_lft,
|
||||
idev->cnf.temp_valid_lft + age);
|
||||
tmp_prefered_lft = min_t(__u32,
|
||||
ifp->prefered_lft,
|
||||
idev->cnf.temp_prefered_lft + age -
|
||||
idev->cnf.max_desync_factor);
|
||||
tmp_plen = ifp->prefix_len;
|
||||
tmp_tstamp = ifp->tstamp;
|
||||
spin_unlock_bh(&ifp->lock);
|
||||
|
||||
regen_advance = idev->cnf.regen_max_retry *
|
||||
idev->cnf.dad_transmits *
|
||||
NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
|
||||
|
||||
/* recalculate max_desync_factor each time and update
|
||||
* idev->desync_factor if it's larger
|
||||
*/
|
||||
cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
|
||||
max_desync_factor = min_t(__u32,
|
||||
idev->cnf.max_desync_factor,
|
||||
cnf_temp_preferred_lft - regen_advance);
|
||||
|
||||
if (unlikely(idev->desync_factor > max_desync_factor)) {
|
||||
if (max_desync_factor > 0) {
|
||||
get_random_bytes(&idev->desync_factor,
|
||||
sizeof(idev->desync_factor));
|
||||
idev->desync_factor %= max_desync_factor;
|
||||
} else {
|
||||
idev->desync_factor = 0;
|
||||
}
|
||||
}
|
||||
|
||||
tmp_valid_lft = min_t(__u32,
|
||||
ifp->valid_lft,
|
||||
idev->cnf.temp_valid_lft + age);
|
||||
tmp_prefered_lft = cnf_temp_preferred_lft + age -
|
||||
idev->desync_factor;
|
||||
tmp_prefered_lft = min_t(__u32, ifp->prefered_lft, tmp_prefered_lft);
|
||||
tmp_plen = ifp->prefix_len;
|
||||
tmp_tstamp = ifp->tstamp;
|
||||
spin_unlock_bh(&ifp->lock);
|
||||
|
||||
write_unlock_bh(&idev->lock);
|
||||
|
||||
/* A temporary address is created only if this calculated Preferred
|
||||
@@ -2150,7 +2165,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
|
||||
}
|
||||
|
||||
/* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
|
||||
static void __ipv6_regen_rndid(struct inet6_dev *idev)
|
||||
static void ipv6_regen_rndid(struct inet6_dev *idev)
|
||||
{
|
||||
regen:
|
||||
get_random_bytes(idev->rndid, sizeof(idev->rndid));
|
||||
@@ -2179,43 +2194,10 @@ regen:
|
||||
}
|
||||
}
|
||||
|
||||
static void ipv6_regen_rndid(unsigned long data)
|
||||
{
|
||||
struct inet6_dev *idev = (struct inet6_dev *) data;
|
||||
unsigned long expires;
|
||||
|
||||
rcu_read_lock_bh();
|
||||
write_lock_bh(&idev->lock);
|
||||
|
||||
if (idev->dead)
|
||||
goto out;
|
||||
|
||||
__ipv6_regen_rndid(idev);
|
||||
|
||||
expires = jiffies +
|
||||
idev->cnf.temp_prefered_lft * HZ -
|
||||
idev->cnf.regen_max_retry * idev->cnf.dad_transmits *
|
||||
NEIGH_VAR(idev->nd_parms, RETRANS_TIME) -
|
||||
idev->cnf.max_desync_factor * HZ;
|
||||
if (time_before(expires, jiffies)) {
|
||||
pr_warn("%s: too short regeneration interval; timer disabled for %s\n",
|
||||
__func__, idev->dev->name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!mod_timer(&idev->regen_timer, expires))
|
||||
in6_dev_hold(idev);
|
||||
|
||||
out:
|
||||
write_unlock_bh(&idev->lock);
|
||||
rcu_read_unlock_bh();
|
||||
in6_dev_put(idev);
|
||||
}
|
||||
|
||||
static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
|
||||
static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
|
||||
{
|
||||
if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
|
||||
__ipv6_regen_rndid(idev);
|
||||
ipv6_regen_rndid(idev);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2356,7 +2338,7 @@ static void manage_tempaddrs(struct inet6_dev *idev,
|
||||
max_valid = 0;
|
||||
|
||||
max_prefered = idev->cnf.temp_prefered_lft -
|
||||
idev->cnf.max_desync_factor - age;
|
||||
idev->desync_factor - age;
|
||||
if (max_prefered < 0)
|
||||
max_prefered = 0;
|
||||
|
||||
@@ -3018,7 +3000,7 @@ static void init_loopback(struct net_device *dev)
|
||||
* lo device down, release this obsolete dst and
|
||||
* reallocate a new router for ifa.
|
||||
*/
|
||||
if (sp_ifa->rt->dst.obsolete > 0) {
|
||||
if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
|
||||
ip6_rt_put(sp_ifa->rt);
|
||||
sp_ifa->rt = NULL;
|
||||
} else {
|
||||
@@ -3594,9 +3576,6 @@ restart:
|
||||
if (!how)
|
||||
idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
|
||||
|
||||
if (how && del_timer(&idev->regen_timer))
|
||||
in6_dev_put(idev);
|
||||
|
||||
/* Step 3: clear tempaddr list */
|
||||
while (!list_empty(&idev->tempaddr_list)) {
|
||||
ifa = list_first_entry(&idev->tempaddr_list,
|
||||
|
@@ -96,7 +96,7 @@ EXPORT_SYMBOL(__inet6_lookup_established);
|
||||
static inline int compute_score(struct sock *sk, struct net *net,
|
||||
const unsigned short hnum,
|
||||
const struct in6_addr *daddr,
|
||||
const int dif)
|
||||
const int dif, bool exact_dif)
|
||||
{
|
||||
int score = -1;
|
||||
|
||||
@@ -109,7 +109,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
|
||||
return -1;
|
||||
score++;
|
||||
}
|
||||
if (sk->sk_bound_dev_if) {
|
||||
if (sk->sk_bound_dev_if || exact_dif) {
|
||||
if (sk->sk_bound_dev_if != dif)
|
||||
return -1;
|
||||
score++;
|
||||
@@ -131,11 +131,12 @@ struct sock *inet6_lookup_listener(struct net *net,
|
||||
unsigned int hash = inet_lhashfn(net, hnum);
|
||||
struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
|
||||
int score, hiscore = 0, matches = 0, reuseport = 0;
|
||||
bool exact_dif = inet6_exact_dif_match(net, skb);
|
||||
struct sock *sk, *result = NULL;
|
||||
u32 phash = 0;
|
||||
|
||||
sk_for_each(sk, &ilb->head) {
|
||||
score = compute_score(sk, net, hnum, daddr, dif);
|
||||
score = compute_score(sk, net, hnum, daddr, dif, exact_dif);
|
||||
if (score > hiscore) {
|
||||
reuseport = sk->sk_reuseport;
|
||||
if (reuseport) {
|
||||
@@ -263,13 +264,15 @@ EXPORT_SYMBOL_GPL(inet6_hash_connect);
|
||||
|
||||
int inet6_hash(struct sock *sk)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (sk->sk_state != TCP_CLOSE) {
|
||||
local_bh_disable();
|
||||
__inet_hash(sk, NULL, ipv6_rcv_saddr_equal);
|
||||
err = __inet_hash(sk, NULL, ipv6_rcv_saddr_equal);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inet6_hash);
|
||||
|
||||
|
@@ -246,7 +246,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
|
||||
|
||||
skb_gro_postpull_rcsum(skb, iph, nlen);
|
||||
|
||||
pp = ops->callbacks.gro_receive(head, skb);
|
||||
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
@@ -157,6 +157,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
|
||||
hash = HASH(&any, local);
|
||||
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
|
||||
if (ipv6_addr_equal(local, &t->parms.laddr) &&
|
||||
ipv6_addr_any(&t->parms.raddr) &&
|
||||
(t->dev->flags & IFF_UP))
|
||||
return t;
|
||||
}
|
||||
@@ -164,6 +165,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
|
||||
hash = HASH(remote, &any);
|
||||
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
|
||||
if (ipv6_addr_equal(remote, &t->parms.raddr) &&
|
||||
ipv6_addr_any(&t->parms.laddr) &&
|
||||
(t->dev->flags & IFF_UP))
|
||||
return t;
|
||||
}
|
||||
@@ -1170,6 +1172,7 @@ route_lookup:
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
skb_push(skb, sizeof(struct ipv6hdr));
|
||||
skb_reset_network_header(skb);
|
||||
ipv6h = ipv6_hdr(skb);
|
||||
|
@@ -120,6 +120,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
|
||||
static bool setsockopt_needs_rtnl(int optname)
|
||||
{
|
||||
switch (optname) {
|
||||
case IPV6_ADDRFORM:
|
||||
case IPV6_ADD_MEMBERSHIP:
|
||||
case IPV6_DROP_MEMBERSHIP:
|
||||
case IPV6_JOIN_ANYCAST:
|
||||
@@ -198,7 +199,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
|
||||
}
|
||||
|
||||
fl6_free_socklist(sk);
|
||||
ipv6_sock_mc_close(sk);
|
||||
__ipv6_sock_mc_close(sk);
|
||||
|
||||
/*
|
||||
* Sock is moving from IPv6 to IPv4 (sk_prot), so
|
||||
|
@@ -276,16 +276,14 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
|
||||
return idev;
|
||||
}
|
||||
|
||||
void ipv6_sock_mc_close(struct sock *sk)
|
||||
void __ipv6_sock_mc_close(struct sock *sk)
|
||||
{
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct ipv6_mc_socklist *mc_lst;
|
||||
struct net *net = sock_net(sk);
|
||||
|
||||
if (!rcu_access_pointer(np->ipv6_mc_list))
|
||||
return;
|
||||
ASSERT_RTNL();
|
||||
|
||||
rtnl_lock();
|
||||
while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) {
|
||||
struct net_device *dev;
|
||||
|
||||
@@ -303,8 +301,17 @@ void ipv6_sock_mc_close(struct sock *sk)
|
||||
|
||||
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
|
||||
kfree_rcu(mc_lst, rcu);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
void ipv6_sock_mc_close(struct sock *sk)
|
||||
{
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
|
||||
if (!rcu_access_pointer(np->ipv6_mc_list))
|
||||
return;
|
||||
rtnl_lock();
|
||||
__ipv6_sock_mc_close(sk);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
|
@@ -180,7 +180,7 @@ struct proto pingv6_prot = {
|
||||
.init = ping_init_sock,
|
||||
.close = ping_close,
|
||||
.connect = ip6_datagram_connect_v6_only,
|
||||
.disconnect = udp_disconnect,
|
||||
.disconnect = __udp_disconnect,
|
||||
.setsockopt = ipv6_setsockopt,
|
||||
.getsockopt = ipv6_getsockopt,
|
||||
.sendmsg = ping_v6_sendmsg,
|
||||
|
@@ -1243,7 +1243,7 @@ struct proto rawv6_prot = {
|
||||
.close = rawv6_close,
|
||||
.destroy = raw6_destroy,
|
||||
.connect = ip6_datagram_connect_v6_only,
|
||||
.disconnect = udp_disconnect,
|
||||
.disconnect = __udp_disconnect,
|
||||
.ioctl = rawv6_ioctl,
|
||||
.init = rawv6_init_sk,
|
||||
.setsockopt = rawv6_setsockopt,
|
||||
|
@@ -456,7 +456,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
|
||||
skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
|
||||
memmove(head->head + sizeof(struct frag_hdr), head->head,
|
||||
(head->data - head->head) - sizeof(struct frag_hdr));
|
||||
head->mac_header += sizeof(struct frag_hdr);
|
||||
if (skb_mac_header_was_set(head))
|
||||
head->mac_header += sizeof(struct frag_hdr);
|
||||
head->network_header += sizeof(struct frag_hdr);
|
||||
|
||||
skb_reset_transport_header(head);
|
||||
|
@@ -102,11 +102,13 @@ static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
|
||||
#ifdef CONFIG_IPV6_ROUTE_INFO
|
||||
static struct rt6_info *rt6_add_route_info(struct net *net,
|
||||
const struct in6_addr *prefix, int prefixlen,
|
||||
const struct in6_addr *gwaddr, int ifindex,
|
||||
const struct in6_addr *gwaddr,
|
||||
struct net_device *dev,
|
||||
unsigned int pref);
|
||||
static struct rt6_info *rt6_get_route_info(struct net *net,
|
||||
const struct in6_addr *prefix, int prefixlen,
|
||||
const struct in6_addr *gwaddr, int ifindex);
|
||||
const struct in6_addr *gwaddr,
|
||||
struct net_device *dev);
|
||||
#endif
|
||||
|
||||
struct uncached_list {
|
||||
@@ -656,7 +658,8 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
|
||||
struct net_device *dev = rt->dst.dev;
|
||||
|
||||
if (dev && !netif_carrier_ok(dev) &&
|
||||
idev->cnf.ignore_routes_with_linkdown)
|
||||
idev->cnf.ignore_routes_with_linkdown &&
|
||||
!(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
|
||||
goto out;
|
||||
|
||||
if (rt6_check_expired(rt))
|
||||
@@ -803,7 +806,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
|
||||
rt = rt6_get_dflt_router(gwaddr, dev);
|
||||
else
|
||||
rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
|
||||
gwaddr, dev->ifindex);
|
||||
gwaddr, dev);
|
||||
|
||||
if (rt && !lifetime) {
|
||||
ip6_del_rt(rt);
|
||||
@@ -811,8 +814,8 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
|
||||
}
|
||||
|
||||
if (!rt && lifetime)
|
||||
rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
|
||||
pref);
|
||||
rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
|
||||
dev, pref);
|
||||
else if (rt)
|
||||
rt->rt6i_flags = RTF_ROUTEINFO |
|
||||
(rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
|
||||
@@ -1050,6 +1053,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
|
||||
int strict = 0;
|
||||
|
||||
strict |= flags & RT6_LOOKUP_F_IFACE;
|
||||
strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
|
||||
if (net->ipv6.devconf_all->forwarding == 0)
|
||||
strict |= RT6_LOOKUP_F_REACHABLE;
|
||||
|
||||
@@ -1789,7 +1793,7 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
|
||||
};
|
||||
struct fib6_table *table;
|
||||
struct rt6_info *rt;
|
||||
int flags = RT6_LOOKUP_F_IFACE;
|
||||
int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
|
||||
|
||||
table = fib6_get_table(net, cfg->fc_table);
|
||||
if (!table)
|
||||
@@ -2325,13 +2329,16 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
|
||||
#ifdef CONFIG_IPV6_ROUTE_INFO
|
||||
static struct rt6_info *rt6_get_route_info(struct net *net,
|
||||
const struct in6_addr *prefix, int prefixlen,
|
||||
const struct in6_addr *gwaddr, int ifindex)
|
||||
const struct in6_addr *gwaddr,
|
||||
struct net_device *dev)
|
||||
{
|
||||
u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
|
||||
int ifindex = dev->ifindex;
|
||||
struct fib6_node *fn;
|
||||
struct rt6_info *rt = NULL;
|
||||
struct fib6_table *table;
|
||||
|
||||
table = fib6_get_table(net, RT6_TABLE_INFO);
|
||||
table = fib6_get_table(net, tb_id);
|
||||
if (!table)
|
||||
return NULL;
|
||||
|
||||
@@ -2357,12 +2364,13 @@ out:
|
||||
|
||||
static struct rt6_info *rt6_add_route_info(struct net *net,
|
||||
const struct in6_addr *prefix, int prefixlen,
|
||||
const struct in6_addr *gwaddr, int ifindex,
|
||||
const struct in6_addr *gwaddr,
|
||||
struct net_device *dev,
|
||||
unsigned int pref)
|
||||
{
|
||||
struct fib6_config cfg = {
|
||||
.fc_metric = IP6_RT_PRIO_USER,
|
||||
.fc_ifindex = ifindex,
|
||||
.fc_ifindex = dev->ifindex,
|
||||
.fc_dst_len = prefixlen,
|
||||
.fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
|
||||
RTF_UP | RTF_PREF(pref),
|
||||
@@ -2371,7 +2379,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
|
||||
.fc_nlinfo.nl_net = net,
|
||||
};
|
||||
|
||||
cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO;
|
||||
cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
|
||||
cfg.fc_dst = *prefix;
|
||||
cfg.fc_gateway = *gwaddr;
|
||||
|
||||
@@ -2381,16 +2389,17 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
|
||||
|
||||
ip6_route_add(&cfg);
|
||||
|
||||
return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
|
||||
return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
|
||||
{
|
||||
u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
|
||||
struct rt6_info *rt;
|
||||
struct fib6_table *table;
|
||||
|
||||
table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
|
||||
table = fib6_get_table(dev_net(dev), tb_id);
|
||||
if (!table)
|
||||
return NULL;
|
||||
|
||||
@@ -2424,20 +2433,20 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
|
||||
|
||||
cfg.fc_gateway = *gwaddr;
|
||||
|
||||
ip6_route_add(&cfg);
|
||||
if (!ip6_route_add(&cfg)) {
|
||||
struct fib6_table *table;
|
||||
|
||||
table = fib6_get_table(dev_net(dev), cfg.fc_table);
|
||||
if (table)
|
||||
table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
|
||||
}
|
||||
|
||||
return rt6_get_dflt_router(gwaddr, dev);
|
||||
}
|
||||
|
||||
void rt6_purge_dflt_routers(struct net *net)
|
||||
static void __rt6_purge_dflt_routers(struct fib6_table *table)
|
||||
{
|
||||
struct rt6_info *rt;
|
||||
struct fib6_table *table;
|
||||
|
||||
/* NOTE: Keep consistent with rt6_get_dflt_router */
|
||||
table = fib6_get_table(net, RT6_TABLE_DFLT);
|
||||
if (!table)
|
||||
return;
|
||||
|
||||
restart:
|
||||
read_lock_bh(&table->tb6_lock);
|
||||
@@ -2451,6 +2460,27 @@ restart:
|
||||
}
|
||||
}
|
||||
read_unlock_bh(&table->tb6_lock);
|
||||
|
||||
table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
|
||||
}
|
||||
|
||||
void rt6_purge_dflt_routers(struct net *net)
|
||||
{
|
||||
struct fib6_table *table;
|
||||
struct hlist_head *head;
|
||||
unsigned int h;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
|
||||
head = &net->ipv6.fib_table_hash[h];
|
||||
hlist_for_each_entry_rcu(table, head, tb6_hlist) {
|
||||
if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
|
||||
__rt6_purge_dflt_routers(table);
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void rtmsg_to_fib6_config(struct net *net,
|
||||
|
@@ -425,7 +425,8 @@ try_again:
|
||||
|
||||
if (is_udp4) {
|
||||
if (inet->cmsg_flags)
|
||||
ip_cmsg_recv(msg, skb);
|
||||
ip_cmsg_recv_offset(msg, skb,
|
||||
sizeof(struct udphdr), off);
|
||||
} else {
|
||||
if (np->rxopt.all)
|
||||
ip6_datagram_recv_specific_ctl(sk, msg, skb);
|
||||
|
@@ -338,7 +338,7 @@ static int l2tp_ip_disconnect(struct sock *sk, int flags)
|
||||
if (sock_flag(sk, SOCK_ZAPPED))
|
||||
return 0;
|
||||
|
||||
return udp_disconnect(sk, flags);
|
||||
return __udp_disconnect(sk, flags);
|
||||
}
|
||||
|
||||
static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
|
||||
|
@@ -410,7 +410,7 @@ static int l2tp_ip6_disconnect(struct sock *sk, int flags)
|
||||
if (sock_flag(sk, SOCK_ZAPPED))
|
||||
return 0;
|
||||
|
||||
return udp_disconnect(sk, flags);
|
||||
return __udp_disconnect(sk, flags);
|
||||
}
|
||||
|
||||
static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
|
||||
|
@@ -18,21 +18,24 @@
|
||||
#include "key.h"
|
||||
#include "aes_ccm.h"
|
||||
|
||||
void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
|
||||
u8 *data, size_t data_len, u8 *mic,
|
||||
size_t mic_len)
|
||||
int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
|
||||
u8 *data, size_t data_len, u8 *mic,
|
||||
size_t mic_len)
|
||||
{
|
||||
struct scatterlist sg[3];
|
||||
struct aead_request *aead_req;
|
||||
int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
|
||||
u8 *__aad;
|
||||
|
||||
char aead_req_data[sizeof(struct aead_request) +
|
||||
crypto_aead_reqsize(tfm)]
|
||||
__aligned(__alignof__(struct aead_request));
|
||||
struct aead_request *aead_req = (void *) aead_req_data;
|
||||
aead_req = kzalloc(reqsize + CCM_AAD_LEN, GFP_ATOMIC);
|
||||
if (!aead_req)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(aead_req, 0, sizeof(aead_req_data));
|
||||
__aad = (u8 *)aead_req + reqsize;
|
||||
memcpy(__aad, aad, CCM_AAD_LEN);
|
||||
|
||||
sg_init_table(sg, 3);
|
||||
sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
|
||||
sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
|
||||
sg_set_buf(&sg[1], data, data_len);
|
||||
sg_set_buf(&sg[2], mic, mic_len);
|
||||
|
||||
@@ -41,6 +44,9 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
|
||||
aead_request_set_ad(aead_req, sg[0].length);
|
||||
|
||||
crypto_aead_encrypt(aead_req);
|
||||
kzfree(aead_req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
|
||||
@@ -48,18 +54,23 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
|
||||
size_t mic_len)
|
||||
{
|
||||
struct scatterlist sg[3];
|
||||
char aead_req_data[sizeof(struct aead_request) +
|
||||
crypto_aead_reqsize(tfm)]
|
||||
__aligned(__alignof__(struct aead_request));
|
||||
struct aead_request *aead_req = (void *) aead_req_data;
|
||||
struct aead_request *aead_req;
|
||||
int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
|
||||
u8 *__aad;
|
||||
int err;
|
||||
|
||||
if (data_len == 0)
|
||||
return -EINVAL;
|
||||
|
||||
memset(aead_req, 0, sizeof(aead_req_data));
|
||||
aead_req = kzalloc(reqsize + CCM_AAD_LEN, GFP_ATOMIC);
|
||||
if (!aead_req)
|
||||
return -ENOMEM;
|
||||
|
||||
__aad = (u8 *)aead_req + reqsize;
|
||||
memcpy(__aad, aad, CCM_AAD_LEN);
|
||||
|
||||
sg_init_table(sg, 3);
|
||||
sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
|
||||
sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
|
||||
sg_set_buf(&sg[1], data, data_len);
|
||||
sg_set_buf(&sg[2], mic, mic_len);
|
||||
|
||||
@@ -67,7 +78,10 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
|
||||
aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0);
|
||||
aead_request_set_ad(aead_req, sg[0].length);
|
||||
|
||||
return crypto_aead_decrypt(aead_req);
|
||||
err = crypto_aead_decrypt(aead_req);
|
||||
kzfree(aead_req);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
|
||||
|
@@ -12,12 +12,14 @@
|
||||
|
||||
#include <linux/crypto.h>
|
||||
|
||||
#define CCM_AAD_LEN 32
|
||||
|
||||
struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
|
||||
size_t key_len,
|
||||
size_t mic_len);
|
||||
void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
|
||||
u8 *data, size_t data_len, u8 *mic,
|
||||
size_t mic_len);
|
||||
int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
|
||||
u8 *data, size_t data_len, u8 *mic,
|
||||
size_t mic_len);
|
||||
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
|
||||
u8 *data, size_t data_len, u8 *mic,
|
||||
size_t mic_len);
|
||||
|
@@ -15,20 +15,23 @@
|
||||
#include "key.h"
|
||||
#include "aes_gcm.h"
|
||||
|
||||
void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
|
||||
u8 *data, size_t data_len, u8 *mic)
|
||||
int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
|
||||
u8 *data, size_t data_len, u8 *mic)
|
||||
{
|
||||
struct scatterlist sg[3];
|
||||
struct aead_request *aead_req;
|
||||
int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
|
||||
u8 *__aad;
|
||||
|
||||
char aead_req_data[sizeof(struct aead_request) +
|
||||
crypto_aead_reqsize(tfm)]
|
||||
__aligned(__alignof__(struct aead_request));
|
||||
struct aead_request *aead_req = (void *)aead_req_data;
|
||||
aead_req = kzalloc(reqsize + GCM_AAD_LEN, GFP_ATOMIC);
|
||||
if (!aead_req)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(aead_req, 0, sizeof(aead_req_data));
|
||||
__aad = (u8 *)aead_req + reqsize;
|
||||
memcpy(__aad, aad, GCM_AAD_LEN);
|
||||
|
||||
sg_init_table(sg, 3);
|
||||
sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
|
||||
sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
|
||||
sg_set_buf(&sg[1], data, data_len);
|
||||
sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN);
|
||||
|
||||
@@ -37,24 +40,31 @@ void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
|
||||
aead_request_set_ad(aead_req, sg[0].length);
|
||||
|
||||
crypto_aead_encrypt(aead_req);
|
||||
kzfree(aead_req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
|
||||
u8 *data, size_t data_len, u8 *mic)
|
||||
{
|
||||
struct scatterlist sg[3];
|
||||
char aead_req_data[sizeof(struct aead_request) +
|
||||
crypto_aead_reqsize(tfm)]
|
||||
__aligned(__alignof__(struct aead_request));
|
||||
struct aead_request *aead_req = (void *)aead_req_data;
|
||||
struct aead_request *aead_req;
|
||||
int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
|
||||
u8 *__aad;
|
||||
int err;
|
||||
|
||||
if (data_len == 0)
|
||||
return -EINVAL;
|
||||
|
||||
memset(aead_req, 0, sizeof(aead_req_data));
|
||||
aead_req = kzalloc(reqsize + GCM_AAD_LEN, GFP_ATOMIC);
|
||||
if (!aead_req)
|
||||
return -ENOMEM;
|
||||
|
||||
__aad = (u8 *)aead_req + reqsize;
|
||||
memcpy(__aad, aad, GCM_AAD_LEN);
|
||||
|
||||
sg_init_table(sg, 3);
|
||||
sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
|
||||
sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
|
||||
sg_set_buf(&sg[1], data, data_len);
|
||||
sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN);
|
||||
|
||||
@@ -63,7 +73,10 @@ int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
|
||||
data_len + IEEE80211_GCMP_MIC_LEN, j_0);
|
||||
aead_request_set_ad(aead_req, sg[0].length);
|
||||
|
||||
return crypto_aead_decrypt(aead_req);
|
||||
err = crypto_aead_decrypt(aead_req);
|
||||
kzfree(aead_req);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
|
||||
|
@@ -11,8 +11,10 @@
|
||||
|
||||
#include <linux/crypto.h>
|
||||
|
||||
void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
|
||||
u8 *data, size_t data_len, u8 *mic);
|
||||
#define GCM_AAD_LEN 32
|
||||
|
||||
int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
|
||||
u8 *data, size_t data_len, u8 *mic);
|
||||
int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
|
||||
u8 *data, size_t data_len, u8 *mic);
|
||||
struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
|
||||
|
@@ -17,28 +17,27 @@
|
||||
#include "key.h"
|
||||
#include "aes_gmac.h"
|
||||
|
||||
#define GMAC_MIC_LEN 16
|
||||
#define GMAC_NONCE_LEN 12
|
||||
#define AAD_LEN 20
|
||||
|
||||
int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
|
||||
const u8 *data, size_t data_len, u8 *mic)
|
||||
{
|
||||
struct scatterlist sg[4];
|
||||
char aead_req_data[sizeof(struct aead_request) +
|
||||
crypto_aead_reqsize(tfm)]
|
||||
__aligned(__alignof__(struct aead_request));
|
||||
struct aead_request *aead_req = (void *)aead_req_data;
|
||||
u8 zero[GMAC_MIC_LEN], iv[AES_BLOCK_SIZE];
|
||||
u8 *zero, *__aad, iv[AES_BLOCK_SIZE];
|
||||
struct aead_request *aead_req;
|
||||
int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
|
||||
|
||||
if (data_len < GMAC_MIC_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
memset(aead_req, 0, sizeof(aead_req_data));
|
||||
aead_req = kzalloc(reqsize + GMAC_MIC_LEN + GMAC_AAD_LEN, GFP_ATOMIC);
|
||||
if (!aead_req)
|
||||
return -ENOMEM;
|
||||
|
||||
zero = (u8 *)aead_req + reqsize;
|
||||
__aad = zero + GMAC_MIC_LEN;
|
||||
memcpy(__aad, aad, GMAC_AAD_LEN);
|
||||
|
||||
memset(zero, 0, GMAC_MIC_LEN);
|
||||
sg_init_table(sg, 4);
|
||||
sg_set_buf(&sg[0], aad, AAD_LEN);
|
||||
sg_set_buf(&sg[0], __aad, GMAC_AAD_LEN);
|
||||
sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN);
|
||||
sg_set_buf(&sg[2], zero, GMAC_MIC_LEN);
|
||||
sg_set_buf(&sg[3], mic, GMAC_MIC_LEN);
|
||||
@@ -49,9 +48,10 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
|
||||
|
||||
aead_request_set_tfm(aead_req, tfm);
|
||||
aead_request_set_crypt(aead_req, sg, sg, 0, iv);
|
||||
aead_request_set_ad(aead_req, AAD_LEN + data_len);
|
||||
aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len);
|
||||
|
||||
crypto_aead_encrypt(aead_req);
|
||||
kzfree(aead_req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -11,6 +11,10 @@
|
||||
|
||||
#include <linux/crypto.h>
|
||||
|
||||
#define GMAC_AAD_LEN 20
|
||||
#define GMAC_MIC_LEN 16
|
||||
#define GMAC_NONCE_LEN 12
|
||||
|
||||
struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
|
||||
size_t key_len);
|
||||
int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
|
||||
|
@@ -820,7 +820,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
|
||||
mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT)
|
||||
break;
|
||||
rcu_read_lock();
|
||||
sta = sta_info_get(sdata, mgmt->da);
|
||||
sta = sta_info_get_bss(sdata, mgmt->da);
|
||||
rcu_read_unlock();
|
||||
if (!sta)
|
||||
return -ENOLINK;
|
||||
|
@@ -2301,6 +2301,8 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
|
||||
__le16 fc = hdr->frame_control;
|
||||
struct sk_buff_head frame_list;
|
||||
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
|
||||
struct ethhdr ethhdr;
|
||||
const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
|
||||
|
||||
if (unlikely(!ieee80211_is_data(fc)))
|
||||
return RX_CONTINUE;
|
||||
@@ -2311,24 +2313,53 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
|
||||
if (!(status->rx_flags & IEEE80211_RX_AMSDU))
|
||||
return RX_CONTINUE;
|
||||
|
||||
if (ieee80211_has_a4(hdr->frame_control) &&
|
||||
rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
|
||||
!rx->sdata->u.vlan.sta)
|
||||
return RX_DROP_UNUSABLE;
|
||||
if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
|
||||
switch (rx->sdata->vif.type) {
|
||||
case NL80211_IFTYPE_AP_VLAN:
|
||||
if (!rx->sdata->u.vlan.sta)
|
||||
return RX_DROP_UNUSABLE;
|
||||
break;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
if (!rx->sdata->u.mgd.use_4addr)
|
||||
return RX_DROP_UNUSABLE;
|
||||
break;
|
||||
default:
|
||||
return RX_DROP_UNUSABLE;
|
||||
}
|
||||
check_da = NULL;
|
||||
check_sa = NULL;
|
||||
} else switch (rx->sdata->vif.type) {
|
||||
case NL80211_IFTYPE_AP:
|
||||
case NL80211_IFTYPE_AP_VLAN:
|
||||
check_da = NULL;
|
||||
break;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
if (!rx->sta ||
|
||||
!test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
|
||||
check_sa = NULL;
|
||||
break;
|
||||
case NL80211_IFTYPE_MESH_POINT:
|
||||
check_sa = NULL;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (is_multicast_ether_addr(hdr->addr1) &&
|
||||
((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
|
||||
rx->sdata->u.vlan.sta) ||
|
||||
(rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
|
||||
rx->sdata->u.mgd.use_4addr)))
|
||||
if (is_multicast_ether_addr(hdr->addr1))
|
||||
return RX_DROP_UNUSABLE;
|
||||
|
||||
skb->dev = dev;
|
||||
__skb_queue_head_init(&frame_list);
|
||||
|
||||
if (ieee80211_data_to_8023_exthdr(skb, ðhdr,
|
||||
rx->sdata->vif.addr,
|
||||
rx->sdata->vif.type))
|
||||
return RX_DROP_UNUSABLE;
|
||||
|
||||
ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
|
||||
rx->sdata->vif.type,
|
||||
rx->local->hw.extra_tx_headroom, true);
|
||||
rx->local->hw.extra_tx_headroom,
|
||||
check_da, check_sa);
|
||||
|
||||
while (!skb_queue_empty(&frame_list)) {
|
||||
rx->skb = __skb_dequeue(&frame_list);
|
||||
|
@@ -405,7 +405,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
|
||||
u8 *pos;
|
||||
u8 pn[6];
|
||||
u64 pn64;
|
||||
u8 aad[2 * AES_BLOCK_SIZE];
|
||||
u8 aad[CCM_AAD_LEN];
|
||||
u8 b_0[AES_BLOCK_SIZE];
|
||||
|
||||
if (info->control.hw_key &&
|
||||
@@ -461,10 +461,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
|
||||
|
||||
pos += IEEE80211_CCMP_HDR_LEN;
|
||||
ccmp_special_blocks(skb, pn, b_0, aad);
|
||||
ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
|
||||
skb_put(skb, mic_len), mic_len);
|
||||
|
||||
return 0;
|
||||
return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
|
||||
skb_put(skb, mic_len), mic_len);
|
||||
}
|
||||
|
||||
|
||||
@@ -639,7 +637,7 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
|
||||
u8 *pos;
|
||||
u8 pn[6];
|
||||
u64 pn64;
|
||||
u8 aad[2 * AES_BLOCK_SIZE];
|
||||
u8 aad[GCM_AAD_LEN];
|
||||
u8 j_0[AES_BLOCK_SIZE];
|
||||
|
||||
if (info->control.hw_key &&
|
||||
@@ -696,10 +694,8 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
|
||||
|
||||
pos += IEEE80211_GCMP_HDR_LEN;
|
||||
gcmp_special_blocks(skb, pn, j_0, aad);
|
||||
ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
|
||||
skb_put(skb, IEEE80211_GCMP_MIC_LEN));
|
||||
|
||||
return 0;
|
||||
return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
|
||||
skb_put(skb, IEEE80211_GCMP_MIC_LEN));
|
||||
}
|
||||
|
||||
ieee80211_tx_result
|
||||
@@ -1123,9 +1119,9 @@ ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
|
||||
struct ieee80211_key *key = tx->key;
|
||||
struct ieee80211_mmie_16 *mmie;
|
||||
struct ieee80211_hdr *hdr;
|
||||
u8 aad[20];
|
||||
u8 aad[GMAC_AAD_LEN];
|
||||
u64 pn64;
|
||||
u8 nonce[12];
|
||||
u8 nonce[GMAC_NONCE_LEN];
|
||||
|
||||
if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
|
||||
return TX_DROP;
|
||||
@@ -1171,7 +1167,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
|
||||
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
|
||||
struct ieee80211_key *key = rx->key;
|
||||
struct ieee80211_mmie_16 *mmie;
|
||||
u8 aad[20], mic[16], ipn[6], nonce[12];
|
||||
u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN];
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
|
||||
if (!ieee80211_is_mgmt(hdr->frame_control))
|
||||
|
@@ -246,6 +246,7 @@ enum {
|
||||
ncsi_dev_state_config_gls,
|
||||
ncsi_dev_state_config_done,
|
||||
ncsi_dev_state_suspend_select = 0x0401,
|
||||
ncsi_dev_state_suspend_gls,
|
||||
ncsi_dev_state_suspend_dcnt,
|
||||
ncsi_dev_state_suspend_dc,
|
||||
ncsi_dev_state_suspend_deselect,
|
||||
@@ -264,6 +265,7 @@ struct ncsi_dev_priv {
|
||||
#endif
|
||||
unsigned int package_num; /* Number of packages */
|
||||
struct list_head packages; /* List of packages */
|
||||
struct ncsi_channel *hot_channel; /* Channel was ever active */
|
||||
struct ncsi_request requests[256]; /* Request table */
|
||||
unsigned int request_id; /* Last used request ID */
|
||||
#define NCSI_REQ_START_IDX 1
|
||||
|
@@ -141,23 +141,35 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp,
|
||||
return -ENODEV;
|
||||
|
||||
/* If the channel is active one, we need reconfigure it */
|
||||
spin_lock_irqsave(&nc->lock, flags);
|
||||
ncm = &nc->modes[NCSI_MODE_LINK];
|
||||
hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
|
||||
ncm->data[3] = ntohl(hncdsc->status);
|
||||
if (!list_empty(&nc->link) ||
|
||||
nc->state != NCSI_CHANNEL_ACTIVE ||
|
||||
(ncm->data[3] & 0x1))
|
||||
nc->state != NCSI_CHANNEL_ACTIVE) {
|
||||
spin_unlock_irqrestore(&nc->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ndp->flags & NCSI_DEV_HWA)
|
||||
spin_unlock_irqrestore(&nc->lock, flags);
|
||||
if (!(ndp->flags & NCSI_DEV_HWA) && !(ncm->data[3] & 0x1))
|
||||
ndp->flags |= NCSI_DEV_RESHUFFLE;
|
||||
|
||||
/* If this channel is the active one and the link doesn't
|
||||
* work, we have to choose another channel to be active one.
|
||||
* The logic here is exactly similar to what we do when link
|
||||
* is down on the active channel.
|
||||
*
|
||||
* On the other hand, we need configure it when host driver
|
||||
* state on the active channel becomes ready.
|
||||
*/
|
||||
ncsi_stop_channel_monitor(nc);
|
||||
|
||||
spin_lock_irqsave(&nc->lock, flags);
|
||||
nc->state = (ncm->data[3] & 0x1) ? NCSI_CHANNEL_INACTIVE :
|
||||
NCSI_CHANNEL_ACTIVE;
|
||||
spin_unlock_irqrestore(&nc->lock, flags);
|
||||
|
||||
spin_lock_irqsave(&ndp->lock, flags);
|
||||
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
|
||||
spin_unlock_irqrestore(&ndp->lock, flags);
|
||||
|
@@ -540,42 +540,86 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
|
||||
nd->state = ncsi_dev_state_suspend_select;
|
||||
/* Fall through */
|
||||
case ncsi_dev_state_suspend_select:
|
||||
ndp->pending_req_num = 1;
|
||||
|
||||
nca.type = NCSI_PKT_CMD_SP;
|
||||
nca.package = np->id;
|
||||
nca.channel = NCSI_RESERVED_CHANNEL;
|
||||
if (ndp->flags & NCSI_DEV_HWA)
|
||||
nca.bytes[0] = 0;
|
||||
else
|
||||
nca.bytes[0] = 1;
|
||||
|
||||
/* To retrieve the last link states of channels in current
|
||||
* package when current active channel needs fail over to
|
||||
* another one. It means we will possibly select another
|
||||
* channel as next active one. The link states of channels
|
||||
* are most important factor of the selection. So we need
|
||||
* accurate link states. Unfortunately, the link states on
|
||||
* inactive channels can't be updated with LSC AEN in time.
|
||||
*/
|
||||
if (ndp->flags & NCSI_DEV_RESHUFFLE)
|
||||
nd->state = ncsi_dev_state_suspend_gls;
|
||||
else
|
||||
nd->state = ncsi_dev_state_suspend_dcnt;
|
||||
ret = ncsi_xmit_cmd(&nca);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
break;
|
||||
case ncsi_dev_state_suspend_gls:
|
||||
ndp->pending_req_num = np->channel_num;
|
||||
|
||||
nca.type = NCSI_PKT_CMD_GLS;
|
||||
nca.package = np->id;
|
||||
|
||||
nd->state = ncsi_dev_state_suspend_dcnt;
|
||||
NCSI_FOR_EACH_CHANNEL(np, nc) {
|
||||
nca.channel = nc->id;
|
||||
ret = ncsi_xmit_cmd(&nca);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
|
||||
break;
|
||||
case ncsi_dev_state_suspend_dcnt:
|
||||
ndp->pending_req_num = 1;
|
||||
|
||||
nca.type = NCSI_PKT_CMD_DCNT;
|
||||
nca.package = np->id;
|
||||
nca.channel = nc->id;
|
||||
|
||||
nd->state = ncsi_dev_state_suspend_dc;
|
||||
ret = ncsi_xmit_cmd(&nca);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
break;
|
||||
case ncsi_dev_state_suspend_dc:
|
||||
ndp->pending_req_num = 1;
|
||||
|
||||
nca.type = NCSI_PKT_CMD_DC;
|
||||
nca.package = np->id;
|
||||
nca.channel = nc->id;
|
||||
nca.bytes[0] = 1;
|
||||
|
||||
nd->state = ncsi_dev_state_suspend_deselect;
|
||||
ret = ncsi_xmit_cmd(&nca);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
break;
|
||||
case ncsi_dev_state_suspend_deselect:
|
||||
ndp->pending_req_num = 1;
|
||||
|
||||
np = ndp->active_package;
|
||||
nc = ndp->active_channel;
|
||||
nca.type = NCSI_PKT_CMD_DP;
|
||||
nca.package = np->id;
|
||||
if (nd->state == ncsi_dev_state_suspend_select) {
|
||||
nca.type = NCSI_PKT_CMD_SP;
|
||||
nca.channel = NCSI_RESERVED_CHANNEL;
|
||||
if (ndp->flags & NCSI_DEV_HWA)
|
||||
nca.bytes[0] = 0;
|
||||
else
|
||||
nca.bytes[0] = 1;
|
||||
nd->state = ncsi_dev_state_suspend_dcnt;
|
||||
} else if (nd->state == ncsi_dev_state_suspend_dcnt) {
|
||||
nca.type = NCSI_PKT_CMD_DCNT;
|
||||
nca.channel = nc->id;
|
||||
nd->state = ncsi_dev_state_suspend_dc;
|
||||
} else if (nd->state == ncsi_dev_state_suspend_dc) {
|
||||
nca.type = NCSI_PKT_CMD_DC;
|
||||
nca.channel = nc->id;
|
||||
nca.bytes[0] = 1;
|
||||
nd->state = ncsi_dev_state_suspend_deselect;
|
||||
} else if (nd->state == ncsi_dev_state_suspend_deselect) {
|
||||
nca.type = NCSI_PKT_CMD_DP;
|
||||
nca.channel = NCSI_RESERVED_CHANNEL;
|
||||
nd->state = ncsi_dev_state_suspend_done;
|
||||
}
|
||||
nca.channel = NCSI_RESERVED_CHANNEL;
|
||||
|
||||
nd->state = ncsi_dev_state_suspend_done;
|
||||
ret = ncsi_xmit_cmd(&nca);
|
||||
if (ret) {
|
||||
nd->state = ncsi_dev_state_functional;
|
||||
return;
|
||||
}
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
break;
|
||||
case ncsi_dev_state_suspend_done:
|
||||
@@ -589,6 +633,10 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
|
||||
netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
|
||||
nd->state);
|
||||
}
|
||||
|
||||
return;
|
||||
error:
|
||||
nd->state = ncsi_dev_state_functional;
|
||||
}
|
||||
|
||||
static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
|
||||
@@ -597,6 +645,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
|
||||
struct net_device *dev = nd->dev;
|
||||
struct ncsi_package *np = ndp->active_package;
|
||||
struct ncsi_channel *nc = ndp->active_channel;
|
||||
struct ncsi_channel *hot_nc = NULL;
|
||||
struct ncsi_cmd_arg nca;
|
||||
unsigned char index;
|
||||
unsigned long flags;
|
||||
@@ -702,12 +751,20 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
|
||||
break;
|
||||
case ncsi_dev_state_config_done:
|
||||
spin_lock_irqsave(&nc->lock, flags);
|
||||
if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1)
|
||||
if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
|
||||
hot_nc = nc;
|
||||
nc->state = NCSI_CHANNEL_ACTIVE;
|
||||
else
|
||||
} else {
|
||||
hot_nc = NULL;
|
||||
nc->state = NCSI_CHANNEL_INACTIVE;
|
||||
}
|
||||
spin_unlock_irqrestore(&nc->lock, flags);
|
||||
|
||||
/* Update the hot channel */
|
||||
spin_lock_irqsave(&ndp->lock, flags);
|
||||
ndp->hot_channel = hot_nc;
|
||||
spin_unlock_irqrestore(&ndp->lock, flags);
|
||||
|
||||
ncsi_start_channel_monitor(nc);
|
||||
ncsi_process_next_channel(ndp);
|
||||
break;
|
||||
@@ -725,10 +782,14 @@ error:
|
||||
static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
|
||||
{
|
||||
struct ncsi_package *np;
|
||||
struct ncsi_channel *nc, *found;
|
||||
struct ncsi_channel *nc, *found, *hot_nc;
|
||||
struct ncsi_channel_mode *ncm;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ndp->lock, flags);
|
||||
hot_nc = ndp->hot_channel;
|
||||
spin_unlock_irqrestore(&ndp->lock, flags);
|
||||
|
||||
/* The search is done once an inactive channel with up
|
||||
* link is found.
|
||||
*/
|
||||
@@ -746,6 +807,9 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
|
||||
if (!found)
|
||||
found = nc;
|
||||
|
||||
if (nc == hot_nc)
|
||||
found = nc;
|
||||
|
||||
ncm = &nc->modes[NCSI_MODE_LINK];
|
||||
if (ncm->data[2] & 0x1) {
|
||||
spin_unlock_irqrestore(&nc->lock, flags);
|
||||
|
@@ -361,16 +361,9 @@ next_hook:
|
||||
if (ret == 0)
|
||||
ret = -EPERM;
|
||||
} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
|
||||
int err;
|
||||
|
||||
RCU_INIT_POINTER(state->hook_entries, entry);
|
||||
err = nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
|
||||
if (err < 0) {
|
||||
if (err == -ESRCH &&
|
||||
(verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
|
||||
goto next_hook;
|
||||
kfree_skb(skb);
|
||||
}
|
||||
ret = nf_queue(skb, state, &entry, verdict);
|
||||
if (ret == 1 && entry)
|
||||
goto next_hook;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@@ -983,7 +983,7 @@ static void gc_worker(struct work_struct *work)
|
||||
return;
|
||||
|
||||
ratio = scanned ? expired_count * 100 / scanned : 0;
|
||||
if (ratio >= 90)
|
||||
if (ratio >= 90 || expired_count == GC_MAX_EVICTS)
|
||||
next_run = 0;
|
||||
|
||||
gc_work->last_bucket = i;
|
||||
|
@@ -18,7 +18,7 @@ unsigned int nf_iterate(struct sk_buff *skb, struct nf_hook_state *state,
|
||||
|
||||
/* nf_queue.c */
|
||||
int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
|
||||
unsigned int queuenum);
|
||||
struct nf_hook_entry **entryp, unsigned int verdict);
|
||||
void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry);
|
||||
int __init netfilter_queue_init(void);
|
||||
|
||||
|
@@ -107,13 +107,8 @@ void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Any packet that leaves via this function must come back
|
||||
* through nf_reinject().
|
||||
*/
|
||||
int nf_queue(struct sk_buff *skb,
|
||||
struct nf_hook_state *state,
|
||||
unsigned int queuenum)
|
||||
static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
|
||||
unsigned int queuenum)
|
||||
{
|
||||
int status = -ENOENT;
|
||||
struct nf_queue_entry *entry = NULL;
|
||||
@@ -161,6 +156,27 @@ err:
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Packets leaving via this function must come back through nf_reinject(). */
|
||||
int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
|
||||
struct nf_hook_entry **entryp, unsigned int verdict)
|
||||
{
|
||||
struct nf_hook_entry *entry = *entryp;
|
||||
int ret;
|
||||
|
||||
RCU_INIT_POINTER(state->hook_entries, entry);
|
||||
ret = __nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
|
||||
if (ret < 0) {
|
||||
if (ret == -ESRCH &&
|
||||
(verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) {
|
||||
*entryp = rcu_dereference(entry->next);
|
||||
return 1;
|
||||
}
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
|
||||
{
|
||||
struct nf_hook_entry *hook_entry;
|
||||
@@ -187,26 +203,26 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
|
||||
entry->state.thresh = INT_MIN;
|
||||
|
||||
if (verdict == NF_ACCEPT) {
|
||||
next_hook:
|
||||
verdict = nf_iterate(skb, &entry->state, &hook_entry);
|
||||
hook_entry = rcu_dereference(hook_entry->next);
|
||||
if (hook_entry)
|
||||
next_hook:
|
||||
verdict = nf_iterate(skb, &entry->state, &hook_entry);
|
||||
}
|
||||
|
||||
switch (verdict & NF_VERDICT_MASK) {
|
||||
case NF_ACCEPT:
|
||||
case NF_STOP:
|
||||
okfn:
|
||||
local_bh_disable();
|
||||
entry->state.okfn(entry->state.net, entry->state.sk, skb);
|
||||
local_bh_enable();
|
||||
break;
|
||||
case NF_QUEUE:
|
||||
RCU_INIT_POINTER(entry->state.hook_entries, hook_entry);
|
||||
err = nf_queue(skb, &entry->state,
|
||||
verdict >> NF_VERDICT_QBITS);
|
||||
if (err < 0) {
|
||||
if (err == -ESRCH &&
|
||||
(verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
|
||||
err = nf_queue(skb, &entry->state, &hook_entry, verdict);
|
||||
if (err == 1) {
|
||||
if (hook_entry)
|
||||
goto next_hook;
|
||||
kfree_skb(skb);
|
||||
goto okfn;
|
||||
}
|
||||
break;
|
||||
case NF_STOLEN:
|
||||
|
@@ -4423,7 +4423,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
|
||||
*/
|
||||
unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
|
||||
{
|
||||
int val;
|
||||
u32 val;
|
||||
|
||||
val = ntohl(nla_get_be32(attr));
|
||||
if (val > max)
|
||||
|
@@ -158,7 +158,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
|
||||
if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
|
||||
if (!(set->flags & NFT_SET_TIMEOUT))
|
||||
return -EINVAL;
|
||||
timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT]));
|
||||
timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
|
||||
tb[NFTA_DYNSET_TIMEOUT])));
|
||||
}
|
||||
|
||||
priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
|
||||
@@ -246,7 +247,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||
goto nla_put_failure;
|
||||
if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
|
||||
goto nla_put_failure;
|
||||
if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout),
|
||||
if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
|
||||
cpu_to_be64(jiffies_to_msecs(priv->timeout)),
|
||||
NFTA_DYNSET_PAD))
|
||||
goto nla_put_failure;
|
||||
if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
|
||||
|
@@ -59,7 +59,8 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
|
||||
const struct nlattr * const tb[])
|
||||
{
|
||||
struct nft_exthdr *priv = nft_expr_priv(expr);
|
||||
u32 offset, len, err;
|
||||
u32 offset, len;
|
||||
int err;
|
||||
|
||||
if (tb[NFTA_EXTHDR_DREG] == NULL ||
|
||||
tb[NFTA_EXTHDR_TYPE] == NULL ||
|
||||
|
@@ -44,6 +44,7 @@ static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
|
||||
[NFTA_HASH_LEN] = { .type = NLA_U32 },
|
||||
[NFTA_HASH_MODULUS] = { .type = NLA_U32 },
|
||||
[NFTA_HASH_SEED] = { .type = NLA_U32 },
|
||||
[NFTA_HASH_OFFSET] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static int nft_hash_init(const struct nft_ctx *ctx,
|
||||
|
@@ -28,22 +28,20 @@ static void nft_range_eval(const struct nft_expr *expr,
|
||||
const struct nft_pktinfo *pkt)
|
||||
{
|
||||
const struct nft_range_expr *priv = nft_expr_priv(expr);
|
||||
bool mismatch;
|
||||
int d1, d2;
|
||||
|
||||
d1 = memcmp(®s->data[priv->sreg], &priv->data_from, priv->len);
|
||||
d2 = memcmp(®s->data[priv->sreg], &priv->data_to, priv->len);
|
||||
switch (priv->op) {
|
||||
case NFT_RANGE_EQ:
|
||||
mismatch = (d1 < 0 || d2 > 0);
|
||||
if (d1 < 0 || d2 > 0)
|
||||
regs->verdict.code = NFT_BREAK;
|
||||
break;
|
||||
case NFT_RANGE_NEQ:
|
||||
mismatch = (d1 >= 0 && d2 <= 0);
|
||||
if (d1 >= 0 && d2 <= 0)
|
||||
regs->verdict.code = NFT_BREAK;
|
||||
break;
|
||||
}
|
||||
|
||||
if (mismatch)
|
||||
regs->verdict.code = NFT_BREAK;
|
||||
}
|
||||
|
||||
static const struct nla_policy nft_range_policy[NFTA_RANGE_MAX + 1] = {
|
||||
@@ -59,6 +57,7 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
|
||||
struct nft_range_expr *priv = nft_expr_priv(expr);
|
||||
struct nft_data_desc desc_from, desc_to;
|
||||
int err;
|
||||
u32 op;
|
||||
|
||||
err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from),
|
||||
&desc_from, tb[NFTA_RANGE_FROM_DATA]);
|
||||
@@ -80,7 +79,20 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
|
||||
if (err < 0)
|
||||
goto err2;
|
||||
|
||||
priv->op = ntohl(nla_get_be32(tb[NFTA_RANGE_OP]));
|
||||
err = nft_parse_u32_check(tb[NFTA_RANGE_OP], U8_MAX, &op);
|
||||
if (err < 0)
|
||||
goto err2;
|
||||
|
||||
switch (op) {
|
||||
case NFT_RANGE_EQ:
|
||||
case NFT_RANGE_NEQ:
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
priv->op = op;
|
||||
priv->len = desc_from.len;
|
||||
return 0;
|
||||
err2:
|
||||
|
@@ -1513,7 +1513,7 @@ xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
|
||||
if (!num_hooks)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
|
||||
ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
|
||||
if (ops == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@@ -32,6 +32,7 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
li.u.ulog.copy_len = info->len;
|
||||
li.u.ulog.group = info->group;
|
||||
li.u.ulog.qthreshold = info->threshold;
|
||||
li.u.ulog.flags = 0;
|
||||
|
||||
if (info->flags & XT_NFLOG_F_COPY_LEN)
|
||||
li.u.ulog.flags |= NF_LOG_F_COPY_LEN;
|
||||
|
@@ -431,7 +431,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
|
||||
CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
|
||||
*/
|
||||
#define MAX_CPJ_v1 (0xFFFFFFFF / (HZ*60*60*24))
|
||||
#define MAX_CPJ (0xFFFFFFFFFFFFFFFF / (HZ*60*60*24))
|
||||
#define MAX_CPJ (0xFFFFFFFFFFFFFFFFULL / (HZ*60*60*24))
|
||||
|
||||
/* Repeated shift and or gives us all 1s, final shift and add 1 gives
|
||||
* us the power of 2 below the theoretical max, so GCC simply does a
|
||||
@@ -473,7 +473,7 @@ static u64 user2credits(u64 user, int revision)
|
||||
return div64_u64(user * HZ * CREDITS_PER_JIFFY_v1,
|
||||
XT_HASHLIMIT_SCALE);
|
||||
} else {
|
||||
if (user > 0xFFFFFFFFFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
|
||||
if (user > 0xFFFFFFFFFFFFFFFFULL / (HZ*CREDITS_PER_JIFFY))
|
||||
return div64_u64(user, XT_HASHLIMIT_SCALE_v2)
|
||||
* HZ * CREDITS_PER_JIFFY;
|
||||
|
||||
|
@@ -26,6 +26,8 @@
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Fan Du <fan.du@windriver.com>");
|
||||
MODULE_DESCRIPTION("Xtables: IPv4/6 IPsec-IPComp SPI match");
|
||||
MODULE_ALIAS("ipt_ipcomp");
|
||||
MODULE_ALIAS("ip6t_ipcomp");
|
||||
|
||||
/* Returns 1 if the spi is matched by the range, 0 otherwise */
|
||||
static inline bool
|
||||
|
@@ -250,7 +250,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
|
||||
static int packet_direct_xmit(struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = skb->dev;
|
||||
netdev_features_t features;
|
||||
struct sk_buff *orig_skb = skb;
|
||||
struct netdev_queue *txq;
|
||||
int ret = NETDEV_TX_BUSY;
|
||||
|
||||
@@ -258,9 +258,8 @@ static int packet_direct_xmit(struct sk_buff *skb)
|
||||
!netif_carrier_ok(dev)))
|
||||
goto drop;
|
||||
|
||||
features = netif_skb_features(skb);
|
||||
if (skb_needs_linearize(skb, features) &&
|
||||
__skb_linearize(skb))
|
||||
skb = validate_xmit_skb_list(skb, dev);
|
||||
if (skb != orig_skb)
|
||||
goto drop;
|
||||
|
||||
txq = skb_get_tx_queue(dev, skb);
|
||||
@@ -280,7 +279,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
|
||||
return ret;
|
||||
drop:
|
||||
atomic_long_inc(&dev->tx_dropped);
|
||||
kfree_skb(skb);
|
||||
kfree_skb_list(skb);
|
||||
return NET_XMIT_DROP;
|
||||
}
|
||||
|
||||
|
@@ -13,5 +13,5 @@ obj-$(CONFIG_RDS_TCP) += rds_tcp.o
|
||||
rds_tcp-y := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \
|
||||
tcp_send.o tcp_stats.o
|
||||
|
||||
ccflags-$(CONFIG_RDS_DEBUG) := -DDEBUG
|
||||
ccflags-$(CONFIG_RDS_DEBUG) := -DRDS_DEBUG
|
||||
|
||||
|
@@ -33,7 +33,7 @@
|
||||
#define KERNEL_HAS_ATOMIC64
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
#ifdef RDS_DEBUG
|
||||
#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
|
||||
#else
|
||||
/* sigh, pr_debug() causes unused variable warnings */
|
||||
|
@@ -276,7 +276,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
|
||||
goto error;
|
||||
|
||||
trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
|
||||
here, ERR_PTR(ret));
|
||||
here, NULL);
|
||||
|
||||
spin_lock_bh(&call->conn->params.peer->lock);
|
||||
hlist_add_head(&call->error_link,
|
||||
|
@@ -193,8 +193,8 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
|
||||
fl6->fl6_dport = htons(7001);
|
||||
fl6->fl6_sport = htons(7000);
|
||||
dst = ip6_route_output(&init_net, NULL, fl6);
|
||||
if (IS_ERR(dst)) {
|
||||
_leave(" [route err %ld]", PTR_ERR(dst));
|
||||
if (dst->error) {
|
||||
_leave(" [route err %d]", dst->error);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
@@ -1028,8 +1028,7 @@ static struct nlattr *find_dump_kind(const struct nlmsghdr *n)
|
||||
|
||||
if (tb[1] == NULL)
|
||||
return NULL;
|
||||
if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]),
|
||||
nla_len(tb[1]), NULL) < 0)
|
||||
if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL) < 0)
|
||||
return NULL;
|
||||
kind = tb2[TCA_ACT_KIND];
|
||||
|
||||
|
@@ -249,8 +249,11 @@ out:
|
||||
static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
|
||||
u64 lastuse)
|
||||
{
|
||||
tcf_lastuse_update(&a->tcfa_tm);
|
||||
struct tcf_mirred *m = to_mirred(a);
|
||||
struct tcf_t *tm = &m->tcf_tm;
|
||||
|
||||
_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
|
||||
tm->lastuse = lastuse;
|
||||
}
|
||||
|
||||
static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
|
||||
|
@@ -345,7 +345,8 @@ replay:
|
||||
if (err == 0) {
|
||||
struct tcf_proto *next = rtnl_dereference(tp->next);
|
||||
|
||||
tfilter_notify(net, skb, n, tp, fh,
|
||||
tfilter_notify(net, skb, n, tp,
|
||||
t->tcm_handle,
|
||||
RTM_DELTFILTER, false);
|
||||
if (tcf_destroy(tp, false))
|
||||
RCU_INIT_POINTER(*back, next);
|
||||
|
@@ -418,6 +418,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
|
||||
__u8 has_data = 0;
|
||||
int gso = 0;
|
||||
int pktcount = 0;
|
||||
int auth_len = 0;
|
||||
struct dst_entry *dst;
|
||||
unsigned char *auth = NULL; /* pointer to auth in skb data */
|
||||
|
||||
@@ -510,7 +511,12 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
|
||||
list_for_each_entry(chunk, &packet->chunk_list, list) {
|
||||
int padded = SCTP_PAD4(chunk->skb->len);
|
||||
|
||||
if (pkt_size + padded > tp->pathmtu)
|
||||
if (chunk == packet->auth)
|
||||
auth_len = padded;
|
||||
else if (auth_len + padded + packet->overhead >
|
||||
tp->pathmtu)
|
||||
goto nomem;
|
||||
else if (pkt_size + padded > tp->pathmtu)
|
||||
break;
|
||||
pkt_size += padded;
|
||||
}
|
||||
|
@@ -3422,6 +3422,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
|
||||
/* Report violation if chunk len overflows */
|
||||
ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
|
||||
if (ch_end > skb_tail_pointer(skb))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
|
||||
/* Now that we know we at least have a chunk header,
|
||||
* do things that are type appropriate.
|
||||
*/
|
||||
@@ -3453,12 +3459,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
|
||||
}
|
||||
}
|
||||
|
||||
/* Report violation if chunk len overflows */
|
||||
ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
|
||||
if (ch_end > skb_tail_pointer(skb))
|
||||
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
|
||||
commands);
|
||||
|
||||
ch = (sctp_chunkhdr_t *) ch_end;
|
||||
} while (ch_end < skb_tail_pointer(skb));
|
||||
|
||||
|
@@ -4687,7 +4687,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
|
||||
static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
|
||||
int __user *optlen)
|
||||
{
|
||||
if (len <= 0)
|
||||
if (len == 0)
|
||||
return -EINVAL;
|
||||
if (len > sizeof(struct sctp_event_subscribe))
|
||||
len = sizeof(struct sctp_event_subscribe);
|
||||
@@ -6430,6 +6430,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
|
||||
if (get_user(len, optlen))
|
||||
return -EFAULT;
|
||||
|
||||
if (len < 0)
|
||||
return -EINVAL;
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
switch (optname) {
|
||||
|
@@ -768,6 +768,9 @@ int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
||||
u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
|
||||
int err;
|
||||
|
||||
if (!netif_is_bridge_port(dev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = switchdev_port_attr_get(dev, &attr);
|
||||
if (err && err != -EOPNOTSUPP)
|
||||
return err;
|
||||
@@ -923,6 +926,9 @@ int switchdev_port_bridge_setlink(struct net_device *dev,
|
||||
struct nlattr *afspec;
|
||||
int err = 0;
|
||||
|
||||
if (!netif_is_bridge_port(dev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
|
||||
IFLA_PROTINFO);
|
||||
if (protinfo) {
|
||||
@@ -956,6 +962,9 @@ int switchdev_port_bridge_dellink(struct net_device *dev,
|
||||
{
|
||||
struct nlattr *afspec;
|
||||
|
||||
if (!netif_is_bridge_port(dev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
|
||||
IFLA_AF_SPEC);
|
||||
if (afspec)
|
||||
|
@@ -247,11 +247,17 @@ int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
|
||||
*
|
||||
* RCU is locked, no other locks set
|
||||
*/
|
||||
void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked)
|
||||
void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
|
||||
struct tipc_msg *hdr)
|
||||
{
|
||||
struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
|
||||
u16 acked = msg_bcast_ack(hdr);
|
||||
struct sk_buff_head xmitq;
|
||||
|
||||
/* Ignore bc acks sent by peer before bcast synch point was received */
|
||||
if (msg_bc_ack_invalid(hdr))
|
||||
return;
|
||||
|
||||
__skb_queue_head_init(&xmitq);
|
||||
|
||||
tipc_bcast_lock(net);
|
||||
@@ -279,11 +285,11 @@ int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
|
||||
__skb_queue_head_init(&xmitq);
|
||||
|
||||
tipc_bcast_lock(net);
|
||||
if (msg_type(hdr) == STATE_MSG) {
|
||||
if (msg_type(hdr) != STATE_MSG) {
|
||||
tipc_link_bc_init_rcv(l, hdr);
|
||||
} else if (!msg_bc_ack_invalid(hdr)) {
|
||||
tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
|
||||
rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq);
|
||||
} else {
|
||||
tipc_link_bc_init_rcv(l, hdr);
|
||||
}
|
||||
tipc_bcast_unlock(net);
|
||||
|
||||
|
@@ -55,7 +55,8 @@ void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id);
|
||||
int tipc_bcast_get_mtu(struct net *net);
|
||||
int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list);
|
||||
int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb);
|
||||
void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked);
|
||||
void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
|
||||
struct tipc_msg *hdr);
|
||||
int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
|
||||
struct tipc_msg *hdr);
|
||||
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
|
||||
|
@@ -1312,6 +1312,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
|
||||
msg_set_next_sent(hdr, l->snd_nxt);
|
||||
msg_set_ack(hdr, l->rcv_nxt - 1);
|
||||
msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
|
||||
msg_set_bc_ack_invalid(hdr, !node_up);
|
||||
msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
|
||||
msg_set_link_tolerance(hdr, tolerance);
|
||||
msg_set_linkprio(hdr, priority);
|
||||
@@ -1574,6 +1575,7 @@ static void tipc_link_build_bc_init_msg(struct tipc_link *l,
|
||||
__skb_queue_head_init(&list);
|
||||
if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
|
||||
return;
|
||||
msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
|
||||
tipc_link_xmit(l, &list, xmitq);
|
||||
}
|
||||
|
||||
|
@@ -714,6 +714,23 @@ static inline void msg_set_peer_stopping(struct tipc_msg *m, u32 s)
|
||||
msg_set_bits(m, 5, 13, 0x1, s);
|
||||
}
|
||||
|
||||
static inline bool msg_bc_ack_invalid(struct tipc_msg *m)
|
||||
{
|
||||
switch (msg_user(m)) {
|
||||
case BCAST_PROTOCOL:
|
||||
case NAME_DISTRIBUTOR:
|
||||
case LINK_PROTOCOL:
|
||||
return msg_bits(m, 5, 14, 0x1);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void msg_set_bc_ack_invalid(struct tipc_msg *m, bool invalid)
|
||||
{
|
||||
msg_set_bits(m, 5, 14, 0x1, invalid);
|
||||
}
|
||||
|
||||
static inline char *msg_media_addr(struct tipc_msg *m)
|
||||
{
|
||||
return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET];
|
||||
|
@@ -156,6 +156,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
|
||||
pr_warn("Bulk publication failure\n");
|
||||
return;
|
||||
}
|
||||
msg_set_bc_ack_invalid(buf_msg(skb), true);
|
||||
item = (struct distr_item *)msg_data(buf_msg(skb));
|
||||
}
|
||||
|
||||
|
@@ -1535,7 +1535,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
|
||||
if (unlikely(usr == LINK_PROTOCOL))
|
||||
tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
|
||||
else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
|
||||
tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack);
|
||||
tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
|
||||
|
||||
/* Receive packet directly if conditions permit */
|
||||
tipc_node_read_lock(n);
|
||||
|
@@ -104,13 +104,16 @@ static int wiphy_suspend(struct device *dev)
|
||||
|
||||
rtnl_lock();
|
||||
if (rdev->wiphy.registered) {
|
||||
if (!rdev->wiphy.wowlan_config)
|
||||
if (!rdev->wiphy.wowlan_config) {
|
||||
cfg80211_leave_all(rdev);
|
||||
cfg80211_process_rdev_events(rdev);
|
||||
}
|
||||
if (rdev->ops->suspend)
|
||||
ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
|
||||
if (ret == 1) {
|
||||
/* Driver refuse to configure wowlan */
|
||||
cfg80211_leave_all(rdev);
|
||||
cfg80211_process_rdev_events(rdev);
|
||||
ret = rdev_suspend(rdev, NULL);
|
||||
}
|
||||
}
|
||||
|
@@ -421,8 +421,8 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
|
||||
|
||||
static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
|
||||
const u8 *addr, enum nl80211_iftype iftype)
|
||||
int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
|
||||
const u8 *addr, enum nl80211_iftype iftype)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
|
||||
struct {
|
||||
@@ -526,13 +526,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
|
||||
enum nl80211_iftype iftype)
|
||||
{
|
||||
return __ieee80211_data_to_8023(skb, NULL, addr, iftype);
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_data_to_8023);
|
||||
EXPORT_SYMBOL(ieee80211_data_to_8023_exthdr);
|
||||
|
||||
int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
|
||||
enum nl80211_iftype iftype,
|
||||
@@ -747,24 +741,18 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
|
||||
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
|
||||
const u8 *addr, enum nl80211_iftype iftype,
|
||||
const unsigned int extra_headroom,
|
||||
bool has_80211_header)
|
||||
const u8 *check_da, const u8 *check_sa)
|
||||
{
|
||||
unsigned int hlen = ALIGN(extra_headroom, 4);
|
||||
struct sk_buff *frame = NULL;
|
||||
u16 ethertype;
|
||||
u8 *payload;
|
||||
int offset = 0, remaining, err;
|
||||
int offset = 0, remaining;
|
||||
struct ethhdr eth;
|
||||
bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb);
|
||||
bool reuse_skb = false;
|
||||
bool last = false;
|
||||
|
||||
if (has_80211_header) {
|
||||
err = __ieee80211_data_to_8023(skb, ð, addr, iftype);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (!last) {
|
||||
unsigned int subframe_len;
|
||||
int len;
|
||||
@@ -781,8 +769,17 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
|
||||
goto purge;
|
||||
|
||||
offset += sizeof(struct ethhdr);
|
||||
/* reuse skb for the last subframe */
|
||||
last = remaining <= subframe_len + padding;
|
||||
|
||||
/* FIXME: should we really accept multicast DA? */
|
||||
if ((check_da && !is_multicast_ether_addr(eth.h_dest) &&
|
||||
!ether_addr_equal(check_da, eth.h_dest)) ||
|
||||
(check_sa && !ether_addr_equal(check_sa, eth.h_source))) {
|
||||
offset += len + padding;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* reuse skb for the last subframe */
|
||||
if (!skb_is_nonlinear(skb) && !reuse_frag && last) {
|
||||
skb_pull(skb, offset);
|
||||
frame = skb;
|
||||
@@ -820,7 +817,6 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
|
||||
|
||||
purge:
|
||||
__skb_queue_purge(list);
|
||||
out:
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);
|
||||
|
Reference in New Issue
Block a user