Merge commit '9f12600fe425bc28f0ccba034a77783c09c15af4' into for-linus
Backmerge of dcache.c changes from mainline. It's that, or complete rebase... Conflicts: fs/splice.c Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Tento commit je obsažen v:
@@ -169,6 +169,7 @@ int register_vlan_dev(struct net_device *dev)
|
||||
if (err < 0)
|
||||
goto out_uninit_mvrp;
|
||||
|
||||
vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
|
||||
err = register_netdevice(dev);
|
||||
if (err < 0)
|
||||
goto out_uninit_mvrp;
|
||||
|
@@ -493,48 +493,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
|
||||
}
|
||||
}
|
||||
|
||||
static int vlan_calculate_locking_subclass(struct net_device *real_dev)
|
||||
{
|
||||
int subclass = 0;
|
||||
|
||||
while (is_vlan_dev(real_dev)) {
|
||||
subclass++;
|
||||
real_dev = vlan_dev_priv(real_dev)->real_dev;
|
||||
}
|
||||
|
||||
return subclass;
|
||||
}
|
||||
|
||||
static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from)
|
||||
{
|
||||
int err = 0, subclass;
|
||||
|
||||
subclass = vlan_calculate_locking_subclass(to);
|
||||
|
||||
spin_lock_nested(&to->addr_list_lock, subclass);
|
||||
err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
|
||||
if (!err)
|
||||
__dev_set_rx_mode(to);
|
||||
spin_unlock(&to->addr_list_lock);
|
||||
}
|
||||
|
||||
static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from)
|
||||
{
|
||||
int err = 0, subclass;
|
||||
|
||||
subclass = vlan_calculate_locking_subclass(to);
|
||||
|
||||
spin_lock_nested(&to->addr_list_lock, subclass);
|
||||
err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
|
||||
if (!err)
|
||||
__dev_set_rx_mode(to);
|
||||
spin_unlock(&to->addr_list_lock);
|
||||
}
|
||||
|
||||
static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
|
||||
{
|
||||
vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
|
||||
vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
|
||||
dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
|
||||
dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -562,6 +524,11 @@ static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
|
||||
netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
|
||||
}
|
||||
|
||||
static int vlan_dev_get_lock_subclass(struct net_device *dev)
|
||||
{
|
||||
return vlan_dev_priv(dev)->nest_level;
|
||||
}
|
||||
|
||||
static const struct header_ops vlan_header_ops = {
|
||||
.create = vlan_dev_hard_header,
|
||||
.rebuild = vlan_dev_rebuild_header,
|
||||
@@ -597,7 +564,6 @@ static const struct net_device_ops vlan_netdev_ops;
|
||||
static int vlan_dev_init(struct net_device *dev)
|
||||
{
|
||||
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
|
||||
int subclass = 0;
|
||||
|
||||
netif_carrier_off(dev);
|
||||
|
||||
@@ -646,8 +612,7 @@ static int vlan_dev_init(struct net_device *dev)
|
||||
|
||||
SET_NETDEV_DEVTYPE(dev, &vlan_type);
|
||||
|
||||
subclass = vlan_calculate_locking_subclass(dev);
|
||||
vlan_dev_set_lockdep_class(dev, subclass);
|
||||
vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
|
||||
|
||||
vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
|
||||
if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
|
||||
@@ -819,6 +784,7 @@ static const struct net_device_ops vlan_netdev_ops = {
|
||||
.ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
|
||||
#endif
|
||||
.ndo_fix_features = vlan_dev_fix_features,
|
||||
.ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
|
||||
};
|
||||
|
||||
void vlan_setup(struct net_device *dev)
|
||||
|
@@ -1545,6 +1545,8 @@ out_neigh:
|
||||
if ((orig_neigh_node) && (!is_single_hop_neigh))
|
||||
batadv_orig_node_free_ref(orig_neigh_node);
|
||||
out:
|
||||
if (router_ifinfo)
|
||||
batadv_neigh_ifinfo_free_ref(router_ifinfo);
|
||||
if (router)
|
||||
batadv_neigh_node_free_ref(router);
|
||||
if (router_router)
|
||||
|
@@ -940,8 +940,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
|
||||
* additional DAT answer may trigger kernel warnings about
|
||||
* a packet coming from the wrong port.
|
||||
*/
|
||||
if (batadv_is_my_client(bat_priv, dat_entry->mac_addr,
|
||||
BATADV_NO_FLAGS)) {
|
||||
if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) {
|
||||
ret = true;
|
||||
goto out;
|
||||
}
|
||||
|
@@ -418,12 +418,13 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
|
||||
struct batadv_neigh_node *neigh_node)
|
||||
{
|
||||
struct batadv_priv *bat_priv;
|
||||
struct batadv_hard_iface *primary_if;
|
||||
struct batadv_hard_iface *primary_if = NULL;
|
||||
struct batadv_frag_packet frag_header;
|
||||
struct sk_buff *skb_fragment;
|
||||
unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
|
||||
unsigned header_size = sizeof(frag_header);
|
||||
unsigned max_fragment_size, max_packet_size;
|
||||
bool ret = false;
|
||||
|
||||
/* To avoid merge and refragmentation at next-hops we never send
|
||||
* fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
|
||||
@@ -483,7 +484,11 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
|
||||
skb->len + ETH_HLEN);
|
||||
batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
|
||||
|
||||
return true;
|
||||
ret = true;
|
||||
|
||||
out_err:
|
||||
return false;
|
||||
if (primary_if)
|
||||
batadv_hardif_free_ref(primary_if);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@@ -42,8 +42,10 @@
|
||||
|
||||
static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
|
||||
{
|
||||
if (atomic_dec_and_test(&gw_node->refcount))
|
||||
if (atomic_dec_and_test(&gw_node->refcount)) {
|
||||
batadv_orig_node_free_ref(gw_node->orig_node);
|
||||
kfree_rcu(gw_node, rcu);
|
||||
}
|
||||
}
|
||||
|
||||
static struct batadv_gw_node *
|
||||
@@ -406,10 +408,15 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
|
||||
if (gateway->bandwidth_down == 0)
|
||||
return;
|
||||
|
||||
gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
|
||||
if (!gw_node)
|
||||
if (!atomic_inc_not_zero(&orig_node->refcount))
|
||||
return;
|
||||
|
||||
gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
|
||||
if (!gw_node) {
|
||||
batadv_orig_node_free_ref(orig_node);
|
||||
return;
|
||||
}
|
||||
|
||||
INIT_HLIST_NODE(&gw_node->list);
|
||||
gw_node->orig_node = orig_node;
|
||||
atomic_set(&gw_node->refcount, 1);
|
||||
|
@@ -83,7 +83,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
|
||||
return true;
|
||||
|
||||
/* no more parents..stop recursion */
|
||||
if (net_dev->iflink == net_dev->ifindex)
|
||||
if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex)
|
||||
return false;
|
||||
|
||||
/* recurse over the parent device */
|
||||
|
@@ -501,12 +501,17 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
|
||||
static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct batadv_orig_ifinfo *orig_ifinfo;
|
||||
struct batadv_neigh_node *router;
|
||||
|
||||
orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
|
||||
|
||||
if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
|
||||
batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
|
||||
|
||||
/* this is the last reference to this object */
|
||||
router = rcu_dereference_protected(orig_ifinfo->router, true);
|
||||
if (router)
|
||||
batadv_neigh_node_free_ref_now(router);
|
||||
kfree(orig_ifinfo);
|
||||
}
|
||||
|
||||
@@ -701,6 +706,47 @@ free_orig_node:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
|
||||
* @bat_priv: the bat priv with all the soft interface information
|
||||
* @neigh: orig node which is to be checked
|
||||
*/
|
||||
static void
|
||||
batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
|
||||
struct batadv_neigh_node *neigh)
|
||||
{
|
||||
struct batadv_neigh_ifinfo *neigh_ifinfo;
|
||||
struct batadv_hard_iface *if_outgoing;
|
||||
struct hlist_node *node_tmp;
|
||||
|
||||
spin_lock_bh(&neigh->ifinfo_lock);
|
||||
|
||||
/* for all ifinfo objects for this neighinator */
|
||||
hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
|
||||
&neigh->ifinfo_list, list) {
|
||||
if_outgoing = neigh_ifinfo->if_outgoing;
|
||||
|
||||
/* always keep the default interface */
|
||||
if (if_outgoing == BATADV_IF_DEFAULT)
|
||||
continue;
|
||||
|
||||
/* don't purge if the interface is not (going) down */
|
||||
if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
|
||||
(if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
|
||||
(if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
|
||||
continue;
|
||||
|
||||
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
||||
"neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
|
||||
neigh->addr, if_outgoing->net_dev->name);
|
||||
|
||||
hlist_del_rcu(&neigh_ifinfo->list);
|
||||
batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&neigh->ifinfo_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
|
||||
* @bat_priv: the bat priv with all the soft interface information
|
||||
@@ -800,6 +846,11 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
|
||||
|
||||
hlist_del_rcu(&neigh_node->list);
|
||||
batadv_neigh_node_free_ref(neigh_node);
|
||||
} else {
|
||||
/* only necessary if not the whole neighbor is to be
|
||||
* deleted, but some interface has been removed.
|
||||
*/
|
||||
batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -857,7 +908,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
|
||||
{
|
||||
struct batadv_neigh_node *best_neigh_node;
|
||||
struct batadv_hard_iface *hard_iface;
|
||||
bool changed;
|
||||
bool changed_ifinfo, changed_neigh;
|
||||
|
||||
if (batadv_has_timed_out(orig_node->last_seen,
|
||||
2 * BATADV_PURGE_TIMEOUT)) {
|
||||
@@ -867,10 +918,10 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
|
||||
jiffies_to_msecs(orig_node->last_seen));
|
||||
return true;
|
||||
}
|
||||
changed = batadv_purge_orig_ifinfo(bat_priv, orig_node);
|
||||
changed = changed || batadv_purge_orig_neighbors(bat_priv, orig_node);
|
||||
changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
|
||||
changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
|
||||
|
||||
if (!changed)
|
||||
if (!changed_ifinfo && !changed_neigh)
|
||||
return false;
|
||||
|
||||
/* first for NULL ... */
|
||||
@@ -1028,7 +1079,8 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
|
||||
bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
|
||||
|
||||
out:
|
||||
batadv_hardif_free_ref(hard_iface);
|
||||
if (hard_iface)
|
||||
batadv_hardif_free_ref(hard_iface);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -859,12 +859,12 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
|
||||
return NF_STOLEN;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4)
|
||||
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
|
||||
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
|
||||
if (skb->protocol == htons(ETH_P_IP) &&
|
||||
skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
|
||||
!skb_is_gso(skb)) {
|
||||
if (br_parse_ip_options(skb))
|
||||
|
@@ -557,7 +557,7 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
|
||||
return r;
|
||||
}
|
||||
|
||||
static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
|
||||
static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
|
||||
int offset, size_t size, bool more)
|
||||
{
|
||||
int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
|
||||
@@ -570,6 +570,24 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
|
||||
int offset, size_t size, bool more)
|
||||
{
|
||||
int ret;
|
||||
struct kvec iov;
|
||||
|
||||
/* sendpage cannot properly handle pages with page_count == 0,
|
||||
* we need to fallback to sendmsg if that's the case */
|
||||
if (page_count(page) >= 1)
|
||||
return __ceph_tcp_sendpage(sock, page, offset, size, more);
|
||||
|
||||
iov.iov_base = kmap(page) + offset;
|
||||
iov.iov_len = size;
|
||||
ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
|
||||
kunmap(page);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Shutdown/close the socket for the given connection.
|
||||
|
@@ -329,6 +329,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
|
||||
dout("crush decode tunable chooseleaf_descend_once = %d",
|
||||
c->chooseleaf_descend_once);
|
||||
|
||||
ceph_decode_need(p, end, sizeof(u8), done);
|
||||
c->chooseleaf_vary_r = ceph_decode_8(p);
|
||||
dout("crush decode tunable chooseleaf_vary_r = %d",
|
||||
c->chooseleaf_vary_r);
|
||||
|
||||
done:
|
||||
dout("crush_decode success\n");
|
||||
return c;
|
||||
|
102
net/core/dev.c
102
net/core/dev.c
@@ -2418,7 +2418,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
|
||||
* 2. No high memory really exists on this machine.
|
||||
*/
|
||||
|
||||
static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
|
||||
static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
int i;
|
||||
@@ -2493,38 +2493,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
|
||||
}
|
||||
|
||||
static netdev_features_t harmonize_features(struct sk_buff *skb,
|
||||
const struct net_device *dev,
|
||||
netdev_features_t features)
|
||||
netdev_features_t features)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
if (skb->ip_summed != CHECKSUM_NONE &&
|
||||
!can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
|
||||
features &= ~NETIF_F_ALL_CSUM;
|
||||
} else if (illegal_highdma(dev, skb)) {
|
||||
} else if (illegal_highdma(skb->dev, skb)) {
|
||||
features &= ~NETIF_F_SG;
|
||||
}
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
|
||||
const struct net_device *dev)
|
||||
netdev_features_t netif_skb_features(struct sk_buff *skb)
|
||||
{
|
||||
__be16 protocol = skb->protocol;
|
||||
netdev_features_t features = dev->features;
|
||||
netdev_features_t features = skb->dev->features;
|
||||
|
||||
if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
|
||||
if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
|
||||
features &= ~NETIF_F_GSO_MASK;
|
||||
|
||||
if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
|
||||
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
|
||||
protocol = veh->h_vlan_encapsulated_proto;
|
||||
} else if (!vlan_tx_tag_present(skb)) {
|
||||
return harmonize_features(skb, dev, features);
|
||||
return harmonize_features(skb, features);
|
||||
}
|
||||
|
||||
features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX);
|
||||
|
||||
if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
|
||||
@@ -2532,9 +2530,9 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
|
||||
NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX;
|
||||
|
||||
return harmonize_features(skb, dev, features);
|
||||
return harmonize_features(skb, features);
|
||||
}
|
||||
EXPORT_SYMBOL(netif_skb_dev_features);
|
||||
EXPORT_SYMBOL(netif_skb_features);
|
||||
|
||||
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
struct netdev_queue *txq)
|
||||
@@ -3953,6 +3951,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
|
||||
}
|
||||
NAPI_GRO_CB(skb)->count = 1;
|
||||
NAPI_GRO_CB(skb)->age = jiffies;
|
||||
NAPI_GRO_CB(skb)->last = skb;
|
||||
skb_shinfo(skb)->gso_size = skb_gro_len(skb);
|
||||
skb->next = napi->gro_list;
|
||||
napi->gro_list = skb;
|
||||
@@ -4542,6 +4541,32 @@ void *netdev_adjacent_get_private(struct list_head *adj_list)
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_adjacent_get_private);
|
||||
|
||||
/**
|
||||
* netdev_upper_get_next_dev_rcu - Get the next dev from upper list
|
||||
* @dev: device
|
||||
* @iter: list_head ** of the current position
|
||||
*
|
||||
* Gets the next device from the dev's upper list, starting from iter
|
||||
* position. The caller must hold RCU read lock.
|
||||
*/
|
||||
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
|
||||
struct list_head **iter)
|
||||
{
|
||||
struct netdev_adjacent *upper;
|
||||
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
|
||||
|
||||
upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
|
||||
|
||||
if (&upper->list == &dev->adj_list.upper)
|
||||
return NULL;
|
||||
|
||||
*iter = &upper->list;
|
||||
|
||||
return upper->dev;
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
|
||||
|
||||
/**
|
||||
* netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
|
||||
* @dev: device
|
||||
@@ -4623,6 +4648,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
|
||||
|
||||
/**
|
||||
* netdev_lower_get_next - Get the next device from the lower neighbour
|
||||
* list
|
||||
* @dev: device
|
||||
* @iter: list_head ** of the current position
|
||||
*
|
||||
* Gets the next netdev_adjacent from the dev's lower neighbour
|
||||
* list, starting from iter position. The caller must hold RTNL lock or
|
||||
* its own locking that guarantees that the neighbour lower
|
||||
* list will remain unchainged.
|
||||
*/
|
||||
void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
|
||||
{
|
||||
struct netdev_adjacent *lower;
|
||||
|
||||
lower = list_entry((*iter)->next, struct netdev_adjacent, list);
|
||||
|
||||
if (&lower->list == &dev->adj_list.lower)
|
||||
return NULL;
|
||||
|
||||
*iter = &lower->list;
|
||||
|
||||
return lower->dev;
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_lower_get_next);
|
||||
|
||||
/**
|
||||
* netdev_lower_get_first_private_rcu - Get the first ->private from the
|
||||
* lower neighbour list, RCU
|
||||
@@ -5073,6 +5124,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_lower_dev_get_private);
|
||||
|
||||
|
||||
int dev_get_nest_level(struct net_device *dev,
|
||||
bool (*type_check)(struct net_device *dev))
|
||||
{
|
||||
struct net_device *lower = NULL;
|
||||
struct list_head *iter;
|
||||
int max_nest = -1;
|
||||
int nest;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
netdev_for_each_lower_dev(dev, lower, iter) {
|
||||
nest = dev_get_nest_level(lower, type_check);
|
||||
if (max_nest < nest)
|
||||
max_nest = nest;
|
||||
}
|
||||
|
||||
if (type_check(dev))
|
||||
max_nest++;
|
||||
|
||||
return max_nest;
|
||||
}
|
||||
EXPORT_SYMBOL(dev_get_nest_level);
|
||||
|
||||
static void dev_change_rx_flags(struct net_device *dev, int flags)
|
||||
{
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
@@ -5238,7 +5313,6 @@ void __dev_set_rx_mode(struct net_device *dev)
|
||||
if (ops->ndo_set_rx_mode)
|
||||
ops->ndo_set_rx_mode(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(__dev_set_rx_mode);
|
||||
|
||||
void dev_set_rx_mode(struct net_device *dev)
|
||||
{
|
||||
@@ -5543,7 +5617,7 @@ static int dev_new_index(struct net *net)
|
||||
|
||||
/* Delayed registration/unregisteration */
|
||||
static LIST_HEAD(net_todo_list);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
|
||||
DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
|
||||
|
||||
static void net_set_todo(struct net_device *dev)
|
||||
{
|
||||
|
@@ -1248,8 +1248,8 @@ void __neigh_set_probe_once(struct neighbour *neigh)
|
||||
neigh->updated = jiffies;
|
||||
if (!(neigh->nud_state & NUD_FAILED))
|
||||
return;
|
||||
neigh->nud_state = NUD_PROBE;
|
||||
atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES));
|
||||
neigh->nud_state = NUD_INCOMPLETE;
|
||||
atomic_set(&neigh->probes, neigh_max_probes(neigh));
|
||||
neigh_add_timer(neigh,
|
||||
jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
|
||||
}
|
||||
|
@@ -24,7 +24,7 @@
|
||||
|
||||
static LIST_HEAD(pernet_list);
|
||||
static struct list_head *first_device = &pernet_list;
|
||||
static DEFINE_MUTEX(net_mutex);
|
||||
DEFINE_MUTEX(net_mutex);
|
||||
|
||||
LIST_HEAD(net_namespace_list);
|
||||
EXPORT_SYMBOL_GPL(net_namespace_list);
|
||||
|
@@ -353,15 +353,46 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
|
||||
|
||||
/* Return with the rtnl_lock held when there are no network
|
||||
* devices unregistering in any network namespace.
|
||||
*/
|
||||
static void rtnl_lock_unregistering_all(void)
|
||||
{
|
||||
struct net *net;
|
||||
bool unregistering;
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
for (;;) {
|
||||
prepare_to_wait(&netdev_unregistering_wq, &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
unregistering = false;
|
||||
rtnl_lock();
|
||||
for_each_net(net) {
|
||||
if (net->dev_unreg_count > 0) {
|
||||
unregistering = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!unregistering)
|
||||
break;
|
||||
__rtnl_unlock();
|
||||
schedule();
|
||||
}
|
||||
finish_wait(&netdev_unregistering_wq, &wait);
|
||||
}
|
||||
|
||||
/**
|
||||
* rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
|
||||
* @ops: struct rtnl_link_ops * to unregister
|
||||
*/
|
||||
void rtnl_link_unregister(struct rtnl_link_ops *ops)
|
||||
{
|
||||
rtnl_lock();
|
||||
/* Close the race with cleanup_net() */
|
||||
mutex_lock(&net_mutex);
|
||||
rtnl_lock_unregistering_all();
|
||||
__rtnl_link_unregister(ops);
|
||||
rtnl_unlock();
|
||||
mutex_unlock(&net_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rtnl_link_unregister);
|
||||
|
||||
|
@@ -3076,7 +3076,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
|
||||
if (unlikely(p->len + len >= 65536))
|
||||
return -E2BIG;
|
||||
|
||||
lp = NAPI_GRO_CB(p)->last ?: p;
|
||||
lp = NAPI_GRO_CB(p)->last;
|
||||
pinfo = skb_shinfo(lp);
|
||||
|
||||
if (headlen <= offset) {
|
||||
@@ -3192,7 +3192,7 @@ merge:
|
||||
|
||||
__skb_pull(skb, offset);
|
||||
|
||||
if (!NAPI_GRO_CB(p)->last)
|
||||
if (NAPI_GRO_CB(p)->last == p)
|
||||
skb_shinfo(p)->frag_list = skb;
|
||||
else
|
||||
NAPI_GRO_CB(p)->last->next = skb;
|
||||
|
@@ -348,8 +348,8 @@ static void __net_random_once_deferred(struct work_struct *w)
|
||||
{
|
||||
struct __net_random_once_work *work =
|
||||
container_of(w, struct __net_random_once_work, work);
|
||||
if (!static_key_enabled(work->key))
|
||||
static_key_slow_inc(work->key);
|
||||
BUG_ON(!static_key_enabled(work->key));
|
||||
static_key_slow_dec(work->key);
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
@@ -367,7 +367,7 @@ static void __net_random_once_disable_jump(struct static_key *key)
|
||||
}
|
||||
|
||||
bool __net_get_random_once(void *buf, int nbytes, bool *done,
|
||||
struct static_key *done_key)
|
||||
struct static_key *once_key)
|
||||
{
|
||||
static DEFINE_SPINLOCK(lock);
|
||||
unsigned long flags;
|
||||
@@ -382,7 +382,7 @@ bool __net_get_random_once(void *buf, int nbytes, bool *done,
|
||||
*done = true;
|
||||
spin_unlock_irqrestore(&lock, flags);
|
||||
|
||||
__net_random_once_disable_jump(done_key);
|
||||
__net_random_once_disable_jump(once_key);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@@ -406,8 +406,9 @@ static int dsa_of_probe(struct platform_device *pdev)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
chip_index = 0;
|
||||
chip_index = -1;
|
||||
for_each_available_child_of_node(np, child) {
|
||||
chip_index++;
|
||||
cd = &pd->chip[chip_index];
|
||||
|
||||
cd->mii_bus = &mdio_bus->dev;
|
||||
|
@@ -1650,6 +1650,39 @@ static int __init init_ipv4_mibs(void)
|
||||
return register_pernet_subsys(&ipv4_mib_ops);
|
||||
}
|
||||
|
||||
static __net_init int inet_init_net(struct net *net)
|
||||
{
|
||||
/*
|
||||
* Set defaults for local port range
|
||||
*/
|
||||
seqlock_init(&net->ipv4.ip_local_ports.lock);
|
||||
net->ipv4.ip_local_ports.range[0] = 32768;
|
||||
net->ipv4.ip_local_ports.range[1] = 61000;
|
||||
|
||||
seqlock_init(&net->ipv4.ping_group_range.lock);
|
||||
/*
|
||||
* Sane defaults - nobody may create ping sockets.
|
||||
* Boot scripts should set this to distro-specific group.
|
||||
*/
|
||||
net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
|
||||
net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __net_exit void inet_exit_net(struct net *net)
|
||||
{
|
||||
}
|
||||
|
||||
static __net_initdata struct pernet_operations af_inet_ops = {
|
||||
.init = inet_init_net,
|
||||
.exit = inet_exit_net,
|
||||
};
|
||||
|
||||
static int __init init_inet_pernet_ops(void)
|
||||
{
|
||||
return register_pernet_subsys(&af_inet_ops);
|
||||
}
|
||||
|
||||
static int ipv4_proc_init(void);
|
||||
|
||||
/*
|
||||
@@ -1794,6 +1827,9 @@ static int __init inet_init(void)
|
||||
if (ip_mr_init())
|
||||
pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
|
||||
#endif
|
||||
|
||||
if (init_inet_pernet_ops())
|
||||
pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
|
||||
/*
|
||||
* Initialise per-cpu ipv4 mibs
|
||||
*/
|
||||
|
@@ -821,13 +821,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
|
||||
fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
|
||||
if (fi == NULL)
|
||||
goto failure;
|
||||
fib_info_cnt++;
|
||||
if (cfg->fc_mx) {
|
||||
fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
|
||||
if (!fi->fib_metrics)
|
||||
goto failure;
|
||||
} else
|
||||
fi->fib_metrics = (u32 *) dst_default_metrics;
|
||||
fib_info_cnt++;
|
||||
|
||||
fi->fib_net = hold_net(net);
|
||||
fi->fib_protocol = cfg->fc_protocol;
|
||||
|
@@ -37,11 +37,11 @@ void inet_get_local_port_range(struct net *net, int *low, int *high)
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
|
||||
seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
|
||||
|
||||
*low = net->ipv4.sysctl_local_ports.range[0];
|
||||
*high = net->ipv4.sysctl_local_ports.range[1];
|
||||
} while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
|
||||
*low = net->ipv4.ip_local_ports.range[0];
|
||||
*high = net->ipv4.ip_local_ports.range[1];
|
||||
} while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
|
||||
}
|
||||
EXPORT_SYMBOL(inet_get_local_port_range);
|
||||
|
||||
|
@@ -42,12 +42,12 @@
|
||||
static bool ip_may_fragment(const struct sk_buff *skb)
|
||||
{
|
||||
return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
|
||||
!skb->local_df;
|
||||
skb->local_df;
|
||||
}
|
||||
|
||||
static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
|
||||
{
|
||||
if (skb->len <= mtu || skb->local_df)
|
||||
if (skb->len <= mtu)
|
||||
return false;
|
||||
|
||||
if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
|
||||
@@ -56,53 +56,6 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
|
||||
{
|
||||
unsigned int mtu;
|
||||
|
||||
if (skb->local_df || !skb_is_gso(skb))
|
||||
return false;
|
||||
|
||||
mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
|
||||
|
||||
/* if seglen > mtu, do software segmentation for IP fragmentation on
|
||||
* output. DF bit cannot be set since ip_forward would have sent
|
||||
* icmp error.
|
||||
*/
|
||||
return skb_gso_network_seglen(skb) > mtu;
|
||||
}
|
||||
|
||||
/* called if GSO skb needs to be fragmented on forward */
|
||||
static int ip_forward_finish_gso(struct sk_buff *skb)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
netdev_features_t features;
|
||||
struct sk_buff *segs;
|
||||
int ret = 0;
|
||||
|
||||
features = netif_skb_dev_features(skb, dst->dev);
|
||||
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
|
||||
if (IS_ERR(segs)) {
|
||||
kfree_skb(skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
consume_skb(skb);
|
||||
|
||||
do {
|
||||
struct sk_buff *nskb = segs->next;
|
||||
int err;
|
||||
|
||||
segs->next = NULL;
|
||||
err = dst_output(segs);
|
||||
|
||||
if (err && ret == 0)
|
||||
ret = err;
|
||||
segs = nskb;
|
||||
} while (segs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ip_forward_finish(struct sk_buff *skb)
|
||||
{
|
||||
@@ -114,9 +67,6 @@ static int ip_forward_finish(struct sk_buff *skb)
|
||||
if (unlikely(opt->optlen))
|
||||
ip_forward_options(skb);
|
||||
|
||||
if (ip_gso_exceeds_dst_mtu(skb))
|
||||
return ip_forward_finish_gso(skb);
|
||||
|
||||
return dst_output(skb);
|
||||
}
|
||||
|
||||
|
@@ -232,8 +232,9 @@ static void ip_expire(unsigned long arg)
|
||||
* "Fragment Reassembly Timeout" message, per RFC792.
|
||||
*/
|
||||
if (qp->user == IP_DEFRAG_AF_PACKET ||
|
||||
(qp->user == IP_DEFRAG_CONNTRACK_IN &&
|
||||
skb_rtable(head)->rt_type != RTN_LOCAL))
|
||||
((qp->user >= IP_DEFRAG_CONNTRACK_IN) &&
|
||||
(qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) &&
|
||||
(skb_rtable(head)->rt_type != RTN_LOCAL)))
|
||||
goto out_rcu_unlock;
|
||||
|
||||
|
||||
|
@@ -211,6 +211,48 @@ static inline int ip_finish_output2(struct sk_buff *skb)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int ip_finish_output_gso(struct sk_buff *skb)
|
||||
{
|
||||
netdev_features_t features;
|
||||
struct sk_buff *segs;
|
||||
int ret = 0;
|
||||
|
||||
/* common case: locally created skb or seglen is <= mtu */
|
||||
if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
|
||||
skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
|
||||
return ip_finish_output2(skb);
|
||||
|
||||
/* Slowpath - GSO segment length is exceeding the dst MTU.
|
||||
*
|
||||
* This can happen in two cases:
|
||||
* 1) TCP GRO packet, DF bit not set
|
||||
* 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly
|
||||
* from host network stack.
|
||||
*/
|
||||
features = netif_skb_features(skb);
|
||||
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
|
||||
if (IS_ERR(segs)) {
|
||||
kfree_skb(skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
consume_skb(skb);
|
||||
|
||||
do {
|
||||
struct sk_buff *nskb = segs->next;
|
||||
int err;
|
||||
|
||||
segs->next = NULL;
|
||||
err = ip_fragment(segs, ip_finish_output2);
|
||||
|
||||
if (err && ret == 0)
|
||||
ret = err;
|
||||
segs = nskb;
|
||||
} while (segs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ip_finish_output(struct sk_buff *skb)
|
||||
{
|
||||
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
|
||||
@@ -220,10 +262,13 @@ static int ip_finish_output(struct sk_buff *skb)
|
||||
return dst_output(skb);
|
||||
}
|
||||
#endif
|
||||
if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
|
||||
if (skb_is_gso(skb))
|
||||
return ip_finish_output_gso(skb);
|
||||
|
||||
if (skb->len > ip_skb_dst_mtu(skb))
|
||||
return ip_fragment(skb, ip_finish_output2);
|
||||
else
|
||||
return ip_finish_output2(skb);
|
||||
|
||||
return ip_finish_output2(skb);
|
||||
}
|
||||
|
||||
int ip_mc_output(struct sock *sk, struct sk_buff *skb)
|
||||
|
@@ -540,9 +540,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
unsigned int max_headroom; /* The extra header space needed */
|
||||
__be32 dst;
|
||||
int err;
|
||||
bool connected = true;
|
||||
bool connected;
|
||||
|
||||
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
|
||||
connected = (tunnel->parms.iph.daddr != 0);
|
||||
|
||||
dst = tnl_params->daddr;
|
||||
if (dst == 0) {
|
||||
@@ -882,6 +883,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
|
||||
*/
|
||||
if (!IS_ERR(itn->fb_tunnel_dev)) {
|
||||
itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
|
||||
itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
|
||||
ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
@@ -239,6 +239,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
static int vti4_err(struct sk_buff *skb, u32 info)
|
||||
{
|
||||
__be32 spi;
|
||||
__u32 mark;
|
||||
struct xfrm_state *x;
|
||||
struct ip_tunnel *tunnel;
|
||||
struct ip_esp_hdr *esph;
|
||||
@@ -254,6 +255,8 @@ static int vti4_err(struct sk_buff *skb, u32 info)
|
||||
if (!tunnel)
|
||||
return -1;
|
||||
|
||||
mark = be32_to_cpu(tunnel->parms.o_key);
|
||||
|
||||
switch (protocol) {
|
||||
case IPPROTO_ESP:
|
||||
esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
|
||||
@@ -281,7 +284,7 @@ static int vti4_err(struct sk_buff *skb, u32 info)
|
||||
return 0;
|
||||
}
|
||||
|
||||
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
|
||||
x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
|
||||
spi, protocol, AF_INET);
|
||||
if (!x)
|
||||
return 0;
|
||||
|
@@ -22,7 +22,6 @@
|
||||
#endif
|
||||
#include <net/netfilter/nf_conntrack_zones.h>
|
||||
|
||||
/* Returns new sk_buff, or NULL */
|
||||
static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
|
||||
{
|
||||
int err;
|
||||
@@ -33,8 +32,10 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
|
||||
err = ip_defrag(skb, user);
|
||||
local_bh_enable();
|
||||
|
||||
if (!err)
|
||||
if (!err) {
|
||||
ip_send_check(ip_hdr(skb));
|
||||
skb->local_df = 1;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@@ -236,15 +236,15 @@ exit:
|
||||
static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
|
||||
kgid_t *high)
|
||||
{
|
||||
kgid_t *data = net->ipv4.sysctl_ping_group_range;
|
||||
kgid_t *data = net->ipv4.ping_group_range.range;
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
|
||||
seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
|
||||
|
||||
*low = data[0];
|
||||
*high = data[1];
|
||||
} while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
|
||||
} while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
|
||||
}
|
||||
|
||||
|
||||
|
@@ -1519,7 +1519,7 @@ static int __mkroute_input(struct sk_buff *skb,
|
||||
struct in_device *out_dev;
|
||||
unsigned int flags = 0;
|
||||
bool do_cache;
|
||||
u32 itag;
|
||||
u32 itag = 0;
|
||||
|
||||
/* get a working reference to the output device */
|
||||
out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
|
||||
|
@@ -45,10 +45,10 @@ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
|
||||
/* Update system visible IP port range */
|
||||
static void set_local_port_range(struct net *net, int range[2])
|
||||
{
|
||||
write_seqlock(&net->ipv4.sysctl_local_ports.lock);
|
||||
net->ipv4.sysctl_local_ports.range[0] = range[0];
|
||||
net->ipv4.sysctl_local_ports.range[1] = range[1];
|
||||
write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
|
||||
write_seqlock(&net->ipv4.ip_local_ports.lock);
|
||||
net->ipv4.ip_local_ports.range[0] = range[0];
|
||||
net->ipv4.ip_local_ports.range[1] = range[1];
|
||||
write_sequnlock(&net->ipv4.ip_local_ports.lock);
|
||||
}
|
||||
|
||||
/* Validate changes from /proc interface. */
|
||||
@@ -57,7 +57,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net =
|
||||
container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
|
||||
container_of(table->data, struct net, ipv4.ip_local_ports.range);
|
||||
int ret;
|
||||
int range[2];
|
||||
struct ctl_table tmp = {
|
||||
@@ -87,14 +87,14 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
|
||||
{
|
||||
kgid_t *data = table->data;
|
||||
struct net *net =
|
||||
container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
|
||||
container_of(table->data, struct net, ipv4.ping_group_range.range);
|
||||
unsigned int seq;
|
||||
do {
|
||||
seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
|
||||
seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
|
||||
|
||||
*low = data[0];
|
||||
*high = data[1];
|
||||
} while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
|
||||
} while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
|
||||
}
|
||||
|
||||
/* Update system visible IP port range */
|
||||
@@ -102,11 +102,11 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
|
||||
{
|
||||
kgid_t *data = table->data;
|
||||
struct net *net =
|
||||
container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
|
||||
write_seqlock(&net->ipv4.sysctl_local_ports.lock);
|
||||
container_of(table->data, struct net, ipv4.ping_group_range.range);
|
||||
write_seqlock(&net->ipv4.ip_local_ports.lock);
|
||||
data[0] = low;
|
||||
data[1] = high;
|
||||
write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
|
||||
write_sequnlock(&net->ipv4.ip_local_ports.lock);
|
||||
}
|
||||
|
||||
/* Validate changes from /proc interface. */
|
||||
@@ -805,7 +805,7 @@ static struct ctl_table ipv4_net_table[] = {
|
||||
},
|
||||
{
|
||||
.procname = "ping_group_range",
|
||||
.data = &init_net.ipv4.sysctl_ping_group_range,
|
||||
.data = &init_net.ipv4.ping_group_range.range,
|
||||
.maxlen = sizeof(gid_t)*2,
|
||||
.mode = 0644,
|
||||
.proc_handler = ipv4_ping_group_range,
|
||||
@@ -819,8 +819,8 @@ static struct ctl_table ipv4_net_table[] = {
|
||||
},
|
||||
{
|
||||
.procname = "ip_local_port_range",
|
||||
.maxlen = sizeof(init_net.ipv4.sysctl_local_ports.range),
|
||||
.data = &init_net.ipv4.sysctl_local_ports.range,
|
||||
.maxlen = sizeof(init_net.ipv4.ip_local_ports.range),
|
||||
.data = &init_net.ipv4.ip_local_ports.range,
|
||||
.mode = 0644,
|
||||
.proc_handler = ipv4_local_port_range,
|
||||
},
|
||||
@@ -858,20 +858,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
|
||||
table[i].data += (void *)net - (void *)&init_net;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sane defaults - nobody may create ping sockets.
|
||||
* Boot scripts should set this to distro-specific group.
|
||||
*/
|
||||
net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
|
||||
net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
|
||||
|
||||
/*
|
||||
* Set defaults for local port range
|
||||
*/
|
||||
seqlock_init(&net->ipv4.sysctl_local_ports.lock);
|
||||
net->ipv4.sysctl_local_ports.range[0] = 32768;
|
||||
net->ipv4.sysctl_local_ports.range[1] = 61000;
|
||||
|
||||
net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
|
||||
if (net->ipv4.ipv4_hdr == NULL)
|
||||
goto err_reg;
|
||||
|
@@ -62,10 +62,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
|
||||
IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED;
|
||||
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
|
||||
|
||||
return x->outer_mode->output2(x, skb);
|
||||
}
|
||||
@@ -73,27 +70,34 @@ EXPORT_SYMBOL(xfrm4_prepare_output);
|
||||
|
||||
int xfrm4_output_finish(struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NETFILTER
|
||||
if (!skb_dst(skb)->xfrm) {
|
||||
IPCB(skb)->flags |= IPSKB_REROUTED;
|
||||
return dst_output(skb);
|
||||
}
|
||||
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
|
||||
#ifdef CONFIG_NETFILTER
|
||||
IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
|
||||
#endif
|
||||
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
return xfrm_output(skb);
|
||||
}
|
||||
|
||||
static int __xfrm4_output(struct sk_buff *skb)
|
||||
{
|
||||
struct xfrm_state *x = skb_dst(skb)->xfrm;
|
||||
|
||||
#ifdef CONFIG_NETFILTER
|
||||
if (!x) {
|
||||
IPCB(skb)->flags |= IPSKB_REROUTED;
|
||||
return dst_output(skb);
|
||||
}
|
||||
#endif
|
||||
|
||||
return x->outer_mode->afinfo->output_finish(skb);
|
||||
}
|
||||
|
||||
int xfrm4_output(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct xfrm_state *x = dst->xfrm;
|
||||
|
||||
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
|
||||
NULL, dst->dev,
|
||||
x->outer_mode->afinfo->output_finish,
|
||||
NULL, skb_dst(skb)->dev, __xfrm4_output,
|
||||
!(IPCB(skb)->flags & IPSKB_REROUTED));
|
||||
}
|
||||
|
||||
|
@@ -50,8 +50,12 @@ int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
|
||||
{
|
||||
int ret;
|
||||
struct xfrm4_protocol *handler;
|
||||
struct xfrm4_protocol __rcu **head = proto_handlers(protocol);
|
||||
|
||||
for_each_protocol_rcu(*proto_handlers(protocol), handler)
|
||||
if (!head)
|
||||
return 0;
|
||||
|
||||
for_each_protocol_rcu(*head, handler)
|
||||
if ((ret = handler->cb_handler(skb, err)) <= 0)
|
||||
return ret;
|
||||
|
||||
@@ -64,15 +68,20 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
|
||||
{
|
||||
int ret;
|
||||
struct xfrm4_protocol *handler;
|
||||
struct xfrm4_protocol __rcu **head = proto_handlers(nexthdr);
|
||||
|
||||
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
|
||||
XFRM_SPI_SKB_CB(skb)->family = AF_INET;
|
||||
XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
|
||||
|
||||
for_each_protocol_rcu(*proto_handlers(nexthdr), handler)
|
||||
if (!head)
|
||||
goto out;
|
||||
|
||||
for_each_protocol_rcu(*head, handler)
|
||||
if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL)
|
||||
return ret;
|
||||
|
||||
out:
|
||||
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
|
||||
|
||||
kfree_skb(skb);
|
||||
@@ -208,6 +217,9 @@ int xfrm4_protocol_register(struct xfrm4_protocol *handler,
|
||||
int ret = -EEXIST;
|
||||
int priority = handler->priority;
|
||||
|
||||
if (!proto_handlers(protocol) || !netproto(protocol))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&xfrm4_protocol_mutex);
|
||||
|
||||
if (!rcu_dereference_protected(*proto_handlers(protocol),
|
||||
@@ -250,6 +262,9 @@ int xfrm4_protocol_deregister(struct xfrm4_protocol *handler,
|
||||
struct xfrm4_protocol *t;
|
||||
int ret = -ENOENT;
|
||||
|
||||
if (!proto_handlers(protocol) || !netproto(protocol))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&xfrm4_protocol_mutex);
|
||||
|
||||
for (pprev = proto_handlers(protocol);
|
||||
|
@@ -196,7 +196,6 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
|
||||
unsigned int off;
|
||||
u16 flush = 1;
|
||||
int proto;
|
||||
__wsum csum;
|
||||
|
||||
off = skb_gro_offset(skb);
|
||||
hlen = off + sizeof(*iph);
|
||||
@@ -264,13 +263,10 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
|
||||
|
||||
NAPI_GRO_CB(skb)->flush |= flush;
|
||||
|
||||
csum = skb->csum;
|
||||
skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
|
||||
skb_gro_postpull_rcsum(skb, iph, nlen);
|
||||
|
||||
pp = ops->callbacks.gro_receive(head, skb);
|
||||
|
||||
skb->csum = csum;
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@@ -344,12 +344,16 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
|
||||
|
||||
static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
|
||||
{
|
||||
if (skb->len <= mtu || skb->local_df)
|
||||
if (skb->len <= mtu)
|
||||
return false;
|
||||
|
||||
/* ipv6 conntrack defrag sets max_frag_size + local_df */
|
||||
if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
|
||||
return true;
|
||||
|
||||
if (skb->local_df)
|
||||
return false;
|
||||
|
||||
if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
|
||||
return false;
|
||||
|
||||
@@ -1225,7 +1229,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
|
||||
unsigned int maxnonfragsize, headersize;
|
||||
|
||||
headersize = sizeof(struct ipv6hdr) +
|
||||
(opt ? opt->tot_len : 0) +
|
||||
(opt ? opt->opt_flen + opt->opt_nflen : 0) +
|
||||
(dst_allfrag(&rt->dst) ?
|
||||
sizeof(struct frag_hdr) : 0) +
|
||||
rt->rt6i_nfheader_len;
|
||||
|
@@ -1557,7 +1557,7 @@ static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
|
||||
{
|
||||
u8 proto;
|
||||
|
||||
if (!data)
|
||||
if (!data || !data[IFLA_IPTUN_PROTO])
|
||||
return 0;
|
||||
|
||||
proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
|
||||
|
@@ -511,6 +511,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
u8 type, u8 code, int offset, __be32 info)
|
||||
{
|
||||
__be32 spi;
|
||||
__u32 mark;
|
||||
struct xfrm_state *x;
|
||||
struct ip6_tnl *t;
|
||||
struct ip_esp_hdr *esph;
|
||||
@@ -524,6 +525,8 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
if (!t)
|
||||
return -1;
|
||||
|
||||
mark = be32_to_cpu(t->parms.o_key);
|
||||
|
||||
switch (protocol) {
|
||||
case IPPROTO_ESP:
|
||||
esph = (struct ip_esp_hdr *)(skb->data + offset);
|
||||
@@ -545,7 +548,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
type != NDISC_REDIRECT)
|
||||
return 0;
|
||||
|
||||
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
|
||||
x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
|
||||
spi, protocol, AF_INET6);
|
||||
if (!x)
|
||||
return 0;
|
||||
@@ -1097,7 +1100,6 @@ static int __init vti6_tunnel_init(void)
|
||||
|
||||
err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP);
|
||||
if (err < 0) {
|
||||
unregister_pernet_device(&vti6_net_ops);
|
||||
pr_err("%s: can't register vti6 protocol\n", __func__);
|
||||
|
||||
goto out;
|
||||
@@ -1106,7 +1108,6 @@ static int __init vti6_tunnel_init(void)
|
||||
err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH);
|
||||
if (err < 0) {
|
||||
xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
|
||||
unregister_pernet_device(&vti6_net_ops);
|
||||
pr_err("%s: can't register vti6 protocol\n", __func__);
|
||||
|
||||
goto out;
|
||||
@@ -1116,7 +1117,6 @@ static int __init vti6_tunnel_init(void)
|
||||
if (err < 0) {
|
||||
xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
|
||||
xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
|
||||
unregister_pernet_device(&vti6_net_ops);
|
||||
pr_err("%s: can't register vti6 protocol\n", __func__);
|
||||
|
||||
goto out;
|
||||
|
@@ -851,7 +851,7 @@ out:
|
||||
static void ndisc_recv_na(struct sk_buff *skb)
|
||||
{
|
||||
struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
|
||||
const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
|
||||
struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
|
||||
const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
|
||||
u8 *lladdr = NULL;
|
||||
u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
|
||||
@@ -944,10 +944,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
|
||||
/*
|
||||
* Change: router to host
|
||||
*/
|
||||
struct rt6_info *rt;
|
||||
rt = rt6_get_dflt_router(saddr, dev);
|
||||
if (rt)
|
||||
ip6_del_rt(rt);
|
||||
rt6_clean_tohost(dev_net(dev), saddr);
|
||||
}
|
||||
|
||||
out:
|
||||
|
@@ -30,13 +30,15 @@ int ip6_route_me_harder(struct sk_buff *skb)
|
||||
.daddr = iph->daddr,
|
||||
.saddr = iph->saddr,
|
||||
};
|
||||
int err;
|
||||
|
||||
dst = ip6_route_output(net, skb->sk, &fl6);
|
||||
if (dst->error) {
|
||||
err = dst->error;
|
||||
if (err) {
|
||||
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
|
||||
LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
|
||||
dst_release(dst);
|
||||
return dst->error;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Drop old route. */
|
||||
|
@@ -2234,6 +2234,27 @@ void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
|
||||
fib6_clean_all(net, fib6_remove_prefsrc, &adni);
|
||||
}
|
||||
|
||||
#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
|
||||
#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
|
||||
|
||||
/* Remove routers and update dst entries when gateway turn into host. */
|
||||
static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
|
||||
{
|
||||
struct in6_addr *gateway = (struct in6_addr *)arg;
|
||||
|
||||
if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
|
||||
((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
|
||||
ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
|
||||
{
|
||||
fib6_clean_all(net, fib6_clean_tohost, gateway);
|
||||
}
|
||||
|
||||
struct arg_dev_net {
|
||||
struct net_device *dev;
|
||||
struct net *net;
|
||||
@@ -2709,6 +2730,9 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
|
||||
if (tb[RTA_OIF])
|
||||
oif = nla_get_u32(tb[RTA_OIF]);
|
||||
|
||||
if (tb[RTA_MARK])
|
||||
fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
|
||||
|
||||
if (iif) {
|
||||
struct net_device *dev;
|
||||
int flags = 0;
|
||||
|
@@ -42,7 +42,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
|
||||
if (NAPI_GRO_CB(skb)->flush)
|
||||
goto skip_csum;
|
||||
|
||||
wsum = skb->csum;
|
||||
wsum = NAPI_GRO_CB(skb)->csum;
|
||||
|
||||
switch (skb->ip_summed) {
|
||||
case CHECKSUM_NONE:
|
||||
|
@@ -114,12 +114,6 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
|
||||
#ifdef CONFIG_NETFILTER
|
||||
IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
|
||||
#endif
|
||||
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
skb->local_df = 1;
|
||||
|
||||
return x->outer_mode->output2(x, skb);
|
||||
@@ -128,11 +122,13 @@ EXPORT_SYMBOL(xfrm6_prepare_output);
|
||||
|
||||
int xfrm6_output_finish(struct sk_buff *skb)
|
||||
{
|
||||
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
|
||||
#ifdef CONFIG_NETFILTER
|
||||
IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
|
||||
#endif
|
||||
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
return xfrm_output(skb);
|
||||
}
|
||||
|
||||
@@ -142,6 +138,13 @@ static int __xfrm6_output(struct sk_buff *skb)
|
||||
struct xfrm_state *x = dst->xfrm;
|
||||
int mtu;
|
||||
|
||||
#ifdef CONFIG_NETFILTER
|
||||
if (!x) {
|
||||
IP6CB(skb)->flags |= IP6SKB_REROUTED;
|
||||
return dst_output(skb);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
mtu = ip6_skb_dst_mtu(skb);
|
||||
else
|
||||
@@ -165,6 +168,7 @@ static int __xfrm6_output(struct sk_buff *skb)
|
||||
|
||||
int xfrm6_output(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
|
||||
skb_dst(skb)->dev, __xfrm6_output);
|
||||
return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb,
|
||||
NULL, skb_dst(skb)->dev, __xfrm6_output,
|
||||
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
|
||||
}
|
||||
|
@@ -50,6 +50,10 @@ int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
|
||||
{
|
||||
int ret;
|
||||
struct xfrm6_protocol *handler;
|
||||
struct xfrm6_protocol __rcu **head = proto_handlers(protocol);
|
||||
|
||||
if (!head)
|
||||
return 0;
|
||||
|
||||
for_each_protocol_rcu(*proto_handlers(protocol), handler)
|
||||
if ((ret = handler->cb_handler(skb, err)) <= 0)
|
||||
@@ -184,10 +188,12 @@ int xfrm6_protocol_register(struct xfrm6_protocol *handler,
|
||||
struct xfrm6_protocol __rcu **pprev;
|
||||
struct xfrm6_protocol *t;
|
||||
bool add_netproto = false;
|
||||
|
||||
int ret = -EEXIST;
|
||||
int priority = handler->priority;
|
||||
|
||||
if (!proto_handlers(protocol) || !netproto(protocol))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&xfrm6_protocol_mutex);
|
||||
|
||||
if (!rcu_dereference_protected(*proto_handlers(protocol),
|
||||
@@ -230,6 +236,9 @@ int xfrm6_protocol_deregister(struct xfrm6_protocol *handler,
|
||||
struct xfrm6_protocol *t;
|
||||
int ret = -ENOENT;
|
||||
|
||||
if (!proto_handlers(protocol) || !netproto(protocol))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&xfrm6_protocol_mutex);
|
||||
|
||||
for (pprev = proto_handlers(protocol);
|
||||
|
@@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
|
||||
spin_lock_irqsave(&list->lock, flags);
|
||||
|
||||
while (list_skb != (struct sk_buff *)list) {
|
||||
if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
|
||||
if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
|
||||
this = list_skb;
|
||||
break;
|
||||
}
|
||||
|
@@ -317,6 +317,7 @@ struct ieee80211_roc_work {
|
||||
|
||||
bool started, abort, hw_begun, notified;
|
||||
bool to_be_freed;
|
||||
bool on_channel;
|
||||
|
||||
unsigned long hw_start_time;
|
||||
|
||||
|
@@ -3598,18 +3598,24 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
|
||||
|
||||
sdata_lock(sdata);
|
||||
|
||||
if (ifmgd->auth_data) {
|
||||
if (ifmgd->auth_data || ifmgd->assoc_data) {
|
||||
const u8 *bssid = ifmgd->auth_data ?
|
||||
ifmgd->auth_data->bss->bssid :
|
||||
ifmgd->assoc_data->bss->bssid;
|
||||
|
||||
/*
|
||||
* If we are trying to authenticate while suspending, cfg80211
|
||||
* won't know and won't actually abort those attempts, thus we
|
||||
* need to do that ourselves.
|
||||
* If we are trying to authenticate / associate while suspending,
|
||||
* cfg80211 won't know and won't actually abort those attempts,
|
||||
* thus we need to do that ourselves.
|
||||
*/
|
||||
ieee80211_send_deauth_disassoc(sdata,
|
||||
ifmgd->auth_data->bss->bssid,
|
||||
ieee80211_send_deauth_disassoc(sdata, bssid,
|
||||
IEEE80211_STYPE_DEAUTH,
|
||||
WLAN_REASON_DEAUTH_LEAVING,
|
||||
false, frame_buf);
|
||||
ieee80211_destroy_auth_data(sdata, false);
|
||||
if (ifmgd->assoc_data)
|
||||
ieee80211_destroy_assoc_data(sdata, false);
|
||||
if (ifmgd->auth_data)
|
||||
ieee80211_destroy_auth_data(sdata, false);
|
||||
cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
|
||||
IEEE80211_DEAUTH_FRAME_LEN);
|
||||
}
|
||||
|
@@ -333,7 +333,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
|
||||
container_of(work, struct ieee80211_roc_work, work.work);
|
||||
struct ieee80211_sub_if_data *sdata = roc->sdata;
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
bool started;
|
||||
bool started, on_channel;
|
||||
|
||||
mutex_lock(&local->mtx);
|
||||
|
||||
@@ -354,14 +354,26 @@ void ieee80211_sw_roc_work(struct work_struct *work)
|
||||
if (!roc->started) {
|
||||
struct ieee80211_roc_work *dep;
|
||||
|
||||
/* start this ROC */
|
||||
ieee80211_offchannel_stop_vifs(local);
|
||||
WARN_ON(local->use_chanctx);
|
||||
|
||||
/* switch channel etc */
|
||||
/* If actually operating on the desired channel (with at least
|
||||
* 20 MHz channel width) don't stop all the operations but still
|
||||
* treat it as though the ROC operation started properly, so
|
||||
* other ROC operations won't interfere with this one.
|
||||
*/
|
||||
roc->on_channel = roc->chan == local->_oper_chandef.chan &&
|
||||
local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
|
||||
local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
|
||||
|
||||
/* start this ROC */
|
||||
ieee80211_recalc_idle(local);
|
||||
|
||||
local->tmp_channel = roc->chan;
|
||||
ieee80211_hw_config(local, 0);
|
||||
if (!roc->on_channel) {
|
||||
ieee80211_offchannel_stop_vifs(local);
|
||||
|
||||
local->tmp_channel = roc->chan;
|
||||
ieee80211_hw_config(local, 0);
|
||||
}
|
||||
|
||||
/* tell userspace or send frame */
|
||||
ieee80211_handle_roc_started(roc);
|
||||
@@ -380,9 +392,10 @@ void ieee80211_sw_roc_work(struct work_struct *work)
|
||||
finish:
|
||||
list_del(&roc->list);
|
||||
started = roc->started;
|
||||
on_channel = roc->on_channel;
|
||||
ieee80211_roc_notify_destroy(roc, !roc->abort);
|
||||
|
||||
if (started) {
|
||||
if (started && !on_channel) {
|
||||
ieee80211_flush_queues(local, NULL);
|
||||
|
||||
local->tmp_channel = NULL;
|
||||
|
@@ -1231,7 +1231,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
|
||||
if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
|
||||
test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
|
||||
sta->last_rx = jiffies;
|
||||
if (ieee80211_is_data(hdr->frame_control)) {
|
||||
if (ieee80211_is_data(hdr->frame_control) &&
|
||||
!is_multicast_ether_addr(hdr->addr1)) {
|
||||
sta->last_rx_rate_idx = status->rate_idx;
|
||||
sta->last_rx_rate_flag = status->flag;
|
||||
sta->last_rx_rate_vht_flag = status->vht_flag;
|
||||
|
@@ -1148,7 +1148,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
|
||||
atomic_dec(&ps->num_sta_ps);
|
||||
|
||||
/* This station just woke up and isn't aware of our SMPS state */
|
||||
if (!ieee80211_smps_is_restrictive(sta->known_smps_mode,
|
||||
if (!ieee80211_vif_is_mesh(&sdata->vif) &&
|
||||
!ieee80211_smps_is_restrictive(sta->known_smps_mode,
|
||||
sdata->smps_mode) &&
|
||||
sta->known_smps_mode != sdata->bss->req_smps &&
|
||||
sta_info_tx_streams(sta) != 1) {
|
||||
|
@@ -314,10 +314,9 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
|
||||
!is_multicast_ether_addr(hdr->addr1))
|
||||
txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
|
||||
|
||||
if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
|
||||
(info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
|
||||
if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
|
||||
txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
|
||||
else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
|
||||
if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
|
||||
txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
|
||||
|
||||
put_unaligned_le16(txflags, pos);
|
||||
|
@@ -21,10 +21,10 @@
|
||||
|
||||
#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \
|
||||
__field(bool, p2p) \
|
||||
__string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
|
||||
__string(vif_name, sdata->name)
|
||||
#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
|
||||
__entry->p2p = sdata->vif.p2p; \
|
||||
__assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name)
|
||||
__assign_str(vif_name, sdata->name)
|
||||
#define VIF_PR_FMT " vif:%s(%d%s)"
|
||||
#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
|
||||
|
||||
|
@@ -1780,7 +1780,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
|
||||
mutex_unlock(&local->mtx);
|
||||
|
||||
if (sched_scan_stopped)
|
||||
cfg80211_sched_scan_stopped(local->hw.wiphy);
|
||||
cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy);
|
||||
|
||||
/*
|
||||
* If this is for hw restart things are still running.
|
||||
|
@@ -129,9 +129,12 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
|
||||
if (!vht_cap_ie || !sband->vht_cap.vht_supported)
|
||||
return;
|
||||
|
||||
/* A VHT STA must support 40 MHz */
|
||||
if (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
|
||||
return;
|
||||
/*
|
||||
* A VHT STA must support 40 MHz, but if we verify that here
|
||||
* then we break a few things - some APs (e.g. Netgear R6300v2
|
||||
* and others based on the BCM4360 chipset) will unset this
|
||||
* capability bit when operating in 20 MHz.
|
||||
*/
|
||||
|
||||
vht_cap->vht_supported = true;
|
||||
|
||||
|
@@ -1336,6 +1336,9 @@ ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
|
||||
#ifdef CONFIG_NF_NAT_NEEDED
|
||||
int ret;
|
||||
|
||||
if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
|
||||
return 0;
|
||||
|
||||
ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
|
||||
cda[CTA_NAT_DST]);
|
||||
if (ret < 0)
|
||||
|
@@ -66,20 +66,6 @@ struct nft_jumpstack {
|
||||
int rulenum;
|
||||
};
|
||||
|
||||
static inline void
|
||||
nft_chain_stats(const struct nft_chain *this, const struct nft_pktinfo *pkt,
|
||||
struct nft_jumpstack *jumpstack, unsigned int stackptr)
|
||||
{
|
||||
struct nft_stats __percpu *stats;
|
||||
const struct nft_chain *chain = stackptr ? jumpstack[0].chain : this;
|
||||
|
||||
rcu_read_lock_bh();
|
||||
stats = rcu_dereference(nft_base_chain(chain)->stats);
|
||||
__this_cpu_inc(stats->pkts);
|
||||
__this_cpu_add(stats->bytes, pkt->skb->len);
|
||||
rcu_read_unlock_bh();
|
||||
}
|
||||
|
||||
enum nft_trace {
|
||||
NFT_TRACE_RULE,
|
||||
NFT_TRACE_RETURN,
|
||||
@@ -117,13 +103,14 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt,
|
||||
unsigned int
|
||||
nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
|
||||
{
|
||||
const struct nft_chain *chain = ops->priv;
|
||||
const struct nft_chain *chain = ops->priv, *basechain = chain;
|
||||
const struct nft_rule *rule;
|
||||
const struct nft_expr *expr, *last;
|
||||
struct nft_data data[NFT_REG_MAX + 1];
|
||||
unsigned int stackptr = 0;
|
||||
struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
|
||||
int rulenum = 0;
|
||||
struct nft_stats __percpu *stats;
|
||||
int rulenum;
|
||||
/*
|
||||
* Cache cursor to avoid problems in case that the cursor is updated
|
||||
* while traversing the ruleset.
|
||||
@@ -131,6 +118,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
|
||||
unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor);
|
||||
|
||||
do_chain:
|
||||
rulenum = 0;
|
||||
rule = list_entry(&chain->rules, struct nft_rule, list);
|
||||
next_rule:
|
||||
data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
|
||||
@@ -156,8 +144,10 @@ next_rule:
|
||||
switch (data[NFT_REG_VERDICT].verdict) {
|
||||
case NFT_BREAK:
|
||||
data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
|
||||
/* fall through */
|
||||
continue;
|
||||
case NFT_CONTINUE:
|
||||
if (unlikely(pkt->skb->nf_trace))
|
||||
nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
@@ -183,37 +173,44 @@ next_rule:
|
||||
jumpstack[stackptr].rule = rule;
|
||||
jumpstack[stackptr].rulenum = rulenum;
|
||||
stackptr++;
|
||||
/* fall through */
|
||||
chain = data[NFT_REG_VERDICT].chain;
|
||||
goto do_chain;
|
||||
case NFT_GOTO:
|
||||
if (unlikely(pkt->skb->nf_trace))
|
||||
nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
|
||||
|
||||
chain = data[NFT_REG_VERDICT].chain;
|
||||
goto do_chain;
|
||||
case NFT_RETURN:
|
||||
if (unlikely(pkt->skb->nf_trace))
|
||||
nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
|
||||
|
||||
/* fall through */
|
||||
break;
|
||||
case NFT_CONTINUE:
|
||||
if (unlikely(pkt->skb->nf_trace && !(chain->flags & NFT_BASE_CHAIN)))
|
||||
nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
if (stackptr > 0) {
|
||||
if (unlikely(pkt->skb->nf_trace))
|
||||
nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
|
||||
|
||||
stackptr--;
|
||||
chain = jumpstack[stackptr].chain;
|
||||
rule = jumpstack[stackptr].rule;
|
||||
rulenum = jumpstack[stackptr].rulenum;
|
||||
goto next_rule;
|
||||
}
|
||||
nft_chain_stats(chain, pkt, jumpstack, stackptr);
|
||||
|
||||
if (unlikely(pkt->skb->nf_trace))
|
||||
nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_POLICY);
|
||||
nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
|
||||
|
||||
return nft_base_chain(chain)->policy;
|
||||
rcu_read_lock_bh();
|
||||
stats = rcu_dereference(nft_base_chain(basechain)->stats);
|
||||
__this_cpu_inc(stats->pkts);
|
||||
__this_cpu_add(stats->bytes, pkt->skb->len);
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
return nft_base_chain(basechain)->policy;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nft_do_chain);
|
||||
|
||||
|
@@ -256,15 +256,15 @@ replay:
|
||||
#endif
|
||||
{
|
||||
nfnl_unlock(subsys_id);
|
||||
kfree_skb(nskb);
|
||||
return netlink_ack(skb, nlh, -EOPNOTSUPP);
|
||||
netlink_ack(skb, nlh, -EOPNOTSUPP);
|
||||
return kfree_skb(nskb);
|
||||
}
|
||||
}
|
||||
|
||||
if (!ss->commit || !ss->abort) {
|
||||
nfnl_unlock(subsys_id);
|
||||
kfree_skb(nskb);
|
||||
return netlink_ack(skb, nlh, -EOPNOTSUPP);
|
||||
netlink_ack(skb, nlh, -EOPNOTSUPP);
|
||||
return kfree_skb(skb);
|
||||
}
|
||||
|
||||
while (skb->len >= nlmsg_total_size(0)) {
|
||||
|
@@ -99,7 +99,7 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
|
||||
_debug("tktlen: %x", tktlen);
|
||||
if (tktlen > AFSTOKEN_RK_TIX_MAX)
|
||||
return -EKEYREJECTED;
|
||||
if (8 * 4 + tktlen != toklen)
|
||||
if (toklen < 8 * 4 + tktlen)
|
||||
return -EKEYREJECTED;
|
||||
|
||||
plen = sizeof(*token) + sizeof(*token->kad) + tktlen;
|
||||
|
@@ -188,6 +188,12 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
|
||||
[TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static void tcindex_filter_result_init(struct tcindex_filter_result *r)
|
||||
{
|
||||
memset(r, 0, sizeof(*r));
|
||||
tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
|
||||
}
|
||||
|
||||
static int
|
||||
tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
||||
u32 handle, struct tcindex_data *p,
|
||||
@@ -207,15 +213,11 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
||||
return err;
|
||||
|
||||
memcpy(&cp, p, sizeof(cp));
|
||||
memset(&new_filter_result, 0, sizeof(new_filter_result));
|
||||
tcf_exts_init(&new_filter_result.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
|
||||
tcindex_filter_result_init(&new_filter_result);
|
||||
|
||||
tcindex_filter_result_init(&cr);
|
||||
if (old_r)
|
||||
memcpy(&cr, r, sizeof(cr));
|
||||
else {
|
||||
memset(&cr, 0, sizeof(cr));
|
||||
tcf_exts_init(&cr.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
|
||||
}
|
||||
cr.res = r->res;
|
||||
|
||||
if (tb[TCA_TCINDEX_HASH])
|
||||
cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
|
||||
@@ -267,9 +269,14 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
||||
err = -ENOMEM;
|
||||
if (!cp.perfect && !cp.h) {
|
||||
if (valid_perfect_hash(&cp)) {
|
||||
int i;
|
||||
|
||||
cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
|
||||
if (!cp.perfect)
|
||||
goto errout;
|
||||
for (i = 0; i < cp.hash; i++)
|
||||
tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT,
|
||||
TCA_TCINDEX_POLICE);
|
||||
balloc = 1;
|
||||
} else {
|
||||
cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
|
||||
@@ -295,14 +302,17 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
||||
tcf_bind_filter(tp, &cr.res, base);
|
||||
}
|
||||
|
||||
tcf_exts_change(tp, &cr.exts, &e);
|
||||
if (old_r)
|
||||
tcf_exts_change(tp, &r->exts, &e);
|
||||
else
|
||||
tcf_exts_change(tp, &cr.exts, &e);
|
||||
|
||||
tcf_tree_lock(tp);
|
||||
if (old_r && old_r != r)
|
||||
memset(old_r, 0, sizeof(*old_r));
|
||||
tcindex_filter_result_init(old_r);
|
||||
|
||||
memcpy(p, &cp, sizeof(cp));
|
||||
memcpy(r, &cr, sizeof(cr));
|
||||
r->res = cr.res;
|
||||
|
||||
if (r == &new_filter_result) {
|
||||
struct tcindex_filter **fp;
|
||||
|
@@ -284,14 +284,22 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy)
|
||||
}
|
||||
EXPORT_SYMBOL(cfg80211_sched_scan_results);
|
||||
|
||||
void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
|
||||
void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy)
|
||||
{
|
||||
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
trace_cfg80211_sched_scan_stopped(wiphy);
|
||||
|
||||
rtnl_lock();
|
||||
__cfg80211_stop_sched_scan(rdev, true);
|
||||
}
|
||||
EXPORT_SYMBOL(cfg80211_sched_scan_stopped_rtnl);
|
||||
|
||||
void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
|
||||
{
|
||||
rtnl_lock();
|
||||
cfg80211_sched_scan_stopped_rtnl(wiphy);
|
||||
rtnl_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
|
||||
|
@@ -234,7 +234,6 @@ void cfg80211_conn_work(struct work_struct *work)
|
||||
NULL, 0, NULL, 0,
|
||||
WLAN_STATUS_UNSPECIFIED_FAILURE,
|
||||
false, NULL);
|
||||
cfg80211_sme_free(wdev);
|
||||
}
|
||||
wdev_unlock(wdev);
|
||||
}
|
||||
@@ -648,6 +647,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
|
||||
cfg80211_unhold_bss(bss_from_pub(bss));
|
||||
cfg80211_put_bss(wdev->wiphy, bss);
|
||||
}
|
||||
cfg80211_sme_free(wdev);
|
||||
return;
|
||||
}
|
||||
|
||||
|
Odkázat v novém úkolu
Zablokovat Uživatele