Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next

Pull networking updates from David Miller:

 1) Seccomp BPF filters can now be JIT'd, from Alexei Starovoitov.

 2) Multiqueue support in xen-netback and xen-netfront, from Andrew J
    Benniston.

 3) Allow tweaking of aggregation settings in cdc_ncm driver, from Bjørn
    Mork.

 4) BPF now has a "random" opcode, from Chema Gonzalez.

 5) Add more BPF documentation and improve test framework, from Daniel
    Borkmann.

 6) Support TCP fastopen over ipv6, from Daniel Lee.

 7) Add software TSO helper functions and use them to support software
    TSO in mvneta and mv643xx_eth drivers.  From Ezequiel Garcia.

 8) Support software TSO in fec driver too, from Nimrod Andy.

 9) Add Broadcom SYSTEMPORT driver, from Florian Fainelli.

10) Handle broadcasts more gracefully over macvlan when there are large
    numbers of interfaces configured, from Herbert Xu.

11) Allow more control over fwmark used for non-socket based responses,
    from Lorenzo Colitti.

12) Do TCP congestion window limiting based upon measurements, from Neal
    Cardwell.

13) Support busy polling in SCTP, from Neal Horman.

14) Allow RSS key to be configured via ethtool, from Venkata Duvvuru.

15) Bridge promisc mode handling improvements from Vlad Yasevich.

16) Don't use inetpeer entries to implement ID generation any more, it
    performs poorly, from Eric Dumazet.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1522 commits)
  rtnetlink: fix userspace API breakage for iproute2 < v3.9.0
  tcp: fixing TLP's FIN recovery
  net: fec: Add software TSO support
  net: fec: Add Scatter/gather support
  net: fec: Increase buffer descriptor entry number
  net: fec: Factorize feature setting
  net: fec: Enable IP header hardware checksum
  net: fec: Factorize the .xmit transmit function
  bridge: fix compile error when compiling without IPv6 support
  bridge: fix smatch warning / potential null pointer dereference
  via-rhine: fix full-duplex with autoneg disable
  bnx2x: Enlarge the dorq threshold for VFs
  bnx2x: Check for UNDI in uncommon branch
  bnx2x: Fix 1G-baseT link
  bnx2x: Fix link for KR with swapped polarity lane
  sctp: Fix sk_ack_backlog wrap-around problem
  net/core: Add VF link state control policy
  net/fsl: xgmac_mdio is dependent on OF_MDIO
  net/fsl: Make xgmac_mdio read error message useful
  net_sched: drr: warn when qdisc is not work conserving
  ...
This commit is contained in:
Linus Torvalds
2014-06-12 14:27:40 -07:00
1265 changed files with 61688 additions and 23103 deletions

View File

@@ -63,7 +63,7 @@ bool vlan_do_receive(struct sk_buff **skbp)
}
/* Must be invoked with rcu_read_lock. */
struct net_device *__vlan_find_dev_deep(struct net_device *dev,
struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
__be16 vlan_proto, u16 vlan_id)
{
struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
@@ -81,13 +81,13 @@ struct net_device *__vlan_find_dev_deep(struct net_device *dev,
upper_dev = netdev_master_upper_dev_get_rcu(dev);
if (upper_dev)
return __vlan_find_dev_deep(upper_dev,
return __vlan_find_dev_deep_rcu(upper_dev,
vlan_proto, vlan_id);
}
return NULL;
}
EXPORT_SYMBOL(__vlan_find_dev_deep);
EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{

View File

@@ -643,9 +643,9 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
netdev_features_t old_features = features;
features &= real_dev->vlan_features;
features = netdev_intersect_features(features, real_dev->vlan_features);
features |= NETIF_F_RXCSUM;
features &= real_dev->features;
features = netdev_intersect_features(features, real_dev->features);
features |= old_features & NETIF_F_SOFT_FEATURES;
features |= NETIF_F_LLTX;
@@ -671,38 +671,36 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct vlan_pcpu_stats *p;
u32 rx_errors = 0, tx_dropped = 0;
int i;
if (vlan_dev_priv(dev)->vlan_pcpu_stats) {
struct vlan_pcpu_stats *p;
u32 rx_errors = 0, tx_dropped = 0;
int i;
for_each_possible_cpu(i) {
u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
unsigned int start;
for_each_possible_cpu(i) {
u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
unsigned int start;
p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
rxpackets = p->rx_packets;
rxbytes = p->rx_bytes;
rxmulticast = p->rx_multicast;
txpackets = p->tx_packets;
txbytes = p->tx_bytes;
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
rxpackets = p->rx_packets;
rxbytes = p->rx_bytes;
rxmulticast = p->rx_multicast;
txpackets = p->tx_packets;
txbytes = p->tx_bytes;
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += rxpackets;
stats->rx_bytes += rxbytes;
stats->multicast += rxmulticast;
stats->tx_packets += txpackets;
stats->tx_bytes += txbytes;
/* rx_errors & tx_dropped are u32 */
rx_errors += p->rx_errors;
tx_dropped += p->tx_dropped;
}
stats->rx_errors = rx_errors;
stats->tx_dropped = tx_dropped;
stats->rx_packets += rxpackets;
stats->rx_bytes += rxbytes;
stats->multicast += rxmulticast;
stats->tx_packets += txpackets;
stats->tx_bytes += txbytes;
/* rx_errors & tx_dropped are u32 */
rx_errors += p->rx_errors;
tx_dropped += p->tx_dropped;
}
stats->rx_errors = rx_errors;
stats->tx_dropped = tx_dropped;
return stats;
}

View File

@@ -1669,7 +1669,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
goto out;
}
if (sk->sk_no_check == 1)
if (sk->sk_no_check_tx)
ddp->deh_sum = 0;
else
ddp->deh_sum = atalk_checksum(skb, len + sizeof(*ddp));

View File

@@ -263,17 +263,11 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
goto out;
}
}
/*
* Not supported yet
*
* #ifndef CONFIG_SINGLE_SIGITF
*/
vcc->qos.txtp.max_pcr = SELECT_TOP_PCR(vcc->qos.txtp);
vcc->qos.txtp.pcr = 0;
vcc->qos.txtp.min_pcr = 0;
/*
* #endif
*/
error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci);
if (!error)
sock->state = SS_CONNECTED;

View File

@@ -245,6 +245,7 @@ static int batadv_algorithms_open(struct inode *inode, struct file *file)
static int batadv_originators_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
return single_open(file, batadv_orig_seq_print_text, net_dev);
}
@@ -258,18 +259,21 @@ static int batadv_originators_hardif_open(struct inode *inode,
struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
return single_open(file, batadv_orig_hardif_seq_print_text, net_dev);
}
static int batadv_gateways_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
return single_open(file, batadv_gw_client_seq_print_text, net_dev);
}
static int batadv_transtable_global_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
return single_open(file, batadv_tt_global_seq_print_text, net_dev);
}
@@ -277,6 +281,7 @@ static int batadv_transtable_global_open(struct inode *inode, struct file *file)
static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
return single_open(file, batadv_bla_claim_table_seq_print_text,
net_dev);
}
@@ -285,6 +290,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
return single_open(file, batadv_bla_backbone_table_seq_print_text,
net_dev);
}
@@ -300,6 +306,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
static int batadv_dat_cache_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
return single_open(file, batadv_dat_cache_seq_print_text, net_dev);
}
#endif
@@ -307,6 +314,7 @@ static int batadv_dat_cache_open(struct inode *inode, struct file *file)
static int batadv_transtable_local_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
return single_open(file, batadv_tt_local_seq_print_text, net_dev);
}
@@ -319,6 +327,7 @@ struct batadv_debuginfo {
static int batadv_nc_nodes_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
return single_open(file, batadv_nc_nodes_seq_print_text, net_dev);
}
#endif
@@ -333,7 +342,7 @@ struct batadv_debuginfo batadv_debuginfo_##_name = { \
.llseek = seq_lseek, \
.release = single_release, \
} \
};
}
/* the following attributes are general and therefore they will be directly
* placed in the BATADV_DEBUGFS_SUBDIR subdirectory of debugfs
@@ -395,7 +404,7 @@ struct batadv_debuginfo batadv_hardif_debuginfo_##_name = { \
.llseek = seq_lseek, \
.release = single_release, \
}, \
};
}
static BATADV_HARDIF_DEBUGINFO(originators, S_IRUGO,
batadv_originators_hardif_open);

View File

@@ -594,7 +594,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
if (!neigh_node)
goto free_orig;
tmp_skb = pskb_copy(skb, GFP_ATOMIC);
tmp_skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb,
cand[i].orig_node,
packet_subtype)) {
@@ -662,6 +662,7 @@ static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
void batadv_dat_status_update(struct net_device *net_dev)
{
struct batadv_priv *bat_priv = netdev_priv(net_dev);
batadv_dat_tvlv_container_update(bat_priv);
}

View File

@@ -24,7 +24,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
#define BATADV_SOURCE_VERSION "2014.2.0"
#define BATADV_SOURCE_VERSION "2014.3.0"
#endif
/* B.A.T.M.A.N. parameters */

View File

@@ -86,6 +86,7 @@ static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
void batadv_nc_status_update(struct net_device *net_dev)
{
struct batadv_priv *bat_priv = netdev_priv(net_dev);
batadv_nc_tvlv_container_update(bat_priv);
}
@@ -1343,7 +1344,7 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
struct ethhdr *ethhdr;
/* Copy skb header to change the mac header */
skb = pskb_copy(skb, GFP_ATOMIC);
skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
if (!skb)
return;

View File

@@ -884,7 +884,7 @@ static void batadv_softif_init_early(struct net_device *dev)
/* generate random address */
eth_hw_addr_random(dev);
SET_ETHTOOL_OPS(dev, &batadv_ethtool_ops);
dev->ethtool_ops = &batadv_ethtool_ops;
memset(priv, 0, sizeof(*priv));
}

View File

@@ -29,12 +29,14 @@
static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
{
struct device *dev = container_of(obj->parent, struct device, kobj);
return to_net_dev(dev);
}
static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
{
struct net_device *net_dev = batadv_kobj_to_netdev(obj);
return netdev_priv(net_dev);
}
@@ -106,7 +108,7 @@ struct batadv_attribute batadv_attr_vlan_##_name = { \
.mode = _mode }, \
.show = _show, \
.store = _store, \
};
}
/* Use this, if you have customized show and store functions */
#define BATADV_ATTR(_name, _mode, _show, _store) \
@@ -115,7 +117,7 @@ struct batadv_attribute batadv_attr_##_name = { \
.mode = _mode }, \
.show = _show, \
.store = _store, \
};
}
#define BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func) \
ssize_t batadv_store_##_name(struct kobject *kobj, \
@@ -124,6 +126,7 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
{ \
struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
struct batadv_priv *bat_priv = netdev_priv(net_dev); \
\
return __batadv_store_bool_attr(buff, count, _post_func, attr, \
&bat_priv->_name, net_dev); \
}
@@ -133,6 +136,7 @@ ssize_t batadv_show_##_name(struct kobject *kobj, \
struct attribute *attr, char *buff) \
{ \
struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
\
return sprintf(buff, "%s\n", \
atomic_read(&bat_priv->_name) == 0 ? \
"disabled" : "enabled"); \
@@ -155,6 +159,7 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
{ \
struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
struct batadv_priv *bat_priv = netdev_priv(net_dev); \
\
return __batadv_store_uint_attr(buff, count, _min, _max, \
_post_func, attr, \
&bat_priv->_name, net_dev); \
@@ -165,6 +170,7 @@ ssize_t batadv_show_##_name(struct kobject *kobj, \
struct attribute *attr, char *buff) \
{ \
struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
\
return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \
} \
@@ -188,6 +194,7 @@ ssize_t batadv_store_vlan_##_name(struct kobject *kobj, \
size_t res = __batadv_store_bool_attr(buff, count, _post_func, \
attr, &vlan->_name, \
bat_priv->soft_iface); \
\
batadv_softif_vlan_free_ref(vlan); \
return res; \
}
@@ -202,6 +209,7 @@ ssize_t batadv_show_vlan_##_name(struct kobject *kobj, \
size_t res = sprintf(buff, "%s\n", \
atomic_read(&vlan->_name) == 0 ? \
"disabled" : "enabled"); \
\
batadv_softif_vlan_free_ref(vlan); \
return res; \
}
@@ -324,12 +332,14 @@ static ssize_t batadv_show_bat_algo(struct kobject *kobj,
struct attribute *attr, char *buff)
{
struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
}
static void batadv_post_gw_reselect(struct net_device *net_dev)
{
struct batadv_priv *bat_priv = netdev_priv(net_dev);
batadv_gw_reselect(bat_priv);
}

View File

@@ -420,12 +420,18 @@ static int conn_send(struct l2cap_conn *conn,
return 0;
}
static void get_dest_bdaddr(struct in6_addr *ip6_daddr,
bdaddr_t *addr, u8 *addr_type)
static u8 get_addr_type_from_eui64(u8 byte)
{
u8 *eui64;
/* Is universal(0) or local(1) bit, */
if (byte & 0x02)
return ADDR_LE_DEV_RANDOM;
eui64 = ip6_daddr->s6_addr + 8;
return ADDR_LE_DEV_PUBLIC;
}
static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
{
u8 *eui64 = ip6_daddr->s6_addr + 8;
addr->b[0] = eui64[7];
addr->b[1] = eui64[6];
@@ -433,16 +439,19 @@ static void get_dest_bdaddr(struct in6_addr *ip6_daddr,
addr->b[3] = eui64[2];
addr->b[4] = eui64[1];
addr->b[5] = eui64[0];
}
addr->b[5] ^= 2;
static void convert_dest_bdaddr(struct in6_addr *ip6_daddr,
bdaddr_t *addr, u8 *addr_type)
{
copy_to_bdaddr(ip6_daddr, addr);
/* Set universal/local bit to 0 */
if (addr->b[5] & 1) {
addr->b[5] &= ~1;
*addr_type = ADDR_LE_DEV_PUBLIC;
} else {
*addr_type = ADDR_LE_DEV_RANDOM;
}
/* We need to toggle the U/L bit that we got from IPv6 address
* so that we get the proper address and type of the BD address.
*/
addr->b[5] ^= 0x02;
*addr_type = get_addr_type_from_eui64(addr->b[5]);
}
static int header_create(struct sk_buff *skb, struct net_device *netdev,
@@ -473,9 +482,11 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
/* Get destination BT device from skb.
* If there is no such peer then discard the packet.
*/
get_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
BT_DBG("dest addr %pMR type %d", &addr, addr_type);
BT_DBG("dest addr %pMR type %s IP %pI6c", &addr,
addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
&hdr->daddr);
read_lock_irqsave(&devices_lock, flags);
peer = peer_lookup_ba(dev, &addr, addr_type);
@@ -556,7 +567,7 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
} else {
unsigned long flags;
get_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
dev = lowpan_dev(netdev);
@@ -564,8 +575,10 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
peer = peer_lookup_ba(dev, &addr, addr_type);
read_unlock_irqrestore(&devices_lock, flags);
BT_DBG("xmit from %s to %pMR (%pI6c) peer %p", netdev->name,
&addr, &lowpan_cb(skb)->addr, peer);
BT_DBG("xmit %s to %pMR type %s IP %pI6c peer %p",
netdev->name, &addr,
addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
&lowpan_cb(skb)->addr, peer);
if (peer && peer->conn)
err = send_pkt(peer->conn, netdev->dev_addr,
@@ -620,13 +633,13 @@ static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
eui[6] = addr[1];
eui[7] = addr[0];
eui[0] ^= 2;
/* Universal/local bit set, RFC 4291 */
/* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
if (addr_type == ADDR_LE_DEV_PUBLIC)
eui[0] |= 1;
eui[0] &= ~0x02;
else
eui[0] &= ~1;
eui[0] |= 0x02;
BT_DBG("type %d addr %*phC", addr_type, 8, eui);
}
static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
@@ -634,7 +647,6 @@ static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
{
netdev->addr_assign_type = NET_ADDR_PERM;
set_addr(netdev->dev_addr, addr->b, addr_type);
netdev->dev_addr[0] ^= 2;
}
static void ifup(struct net_device *netdev)
@@ -684,13 +696,6 @@ static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
EUI64_ADDR_LEN);
peer->eui64_addr[0] ^= 2; /* second bit-flip (Universe/Local)
* is done according RFC2464
*/
raw_dump_inline(__func__, "peer IPv6 address",
(unsigned char *)&peer->peer_addr, 16);
raw_dump_inline(__func__, "peer EUI64 address", peer->eui64_addr, 8);
write_lock_irqsave(&devices_lock, flags);
INIT_LIST_HEAD(&peer->list);

View File

@@ -28,6 +28,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include "smp.h"
#include "a2mp.h"
@@ -367,9 +368,23 @@ static void le_conn_timeout(struct work_struct *work)
{
struct hci_conn *conn = container_of(work, struct hci_conn,
le_conn_timeout.work);
struct hci_dev *hdev = conn->hdev;
BT_DBG("");
/* We could end up here due to having done directed advertising,
* so clean up the state if necessary. This should however only
* happen with broken hardware or if low duty cycle was used
* (which doesn't have a timeout of its own).
*/
if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
u8 enable = 0x00;
hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
&enable);
hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
return;
}
hci_le_create_connection_cancel(conn);
}
@@ -393,6 +408,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
conn->io_capability = hdev->io_capability;
conn->remote_auth = 0xff;
conn->key_type = 0xff;
conn->tx_power = HCI_TX_POWER_INVALID;
conn->max_tx_power = HCI_TX_POWER_INVALID;
set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -401,6 +418,10 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
case ACL_LINK:
conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
break;
case LE_LINK:
/* conn->src should reflect the local identity address */
hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
break;
case SCO_LINK:
if (lmp_esco_capable(hdev))
conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
@@ -545,6 +566,11 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
* favor of connection establishment, we should restart it.
*/
hci_update_background_scan(hdev);
/* Re-enable advertising in case this was a failed connection
* attempt as a peripheral.
*/
mgmt_reenable_advertising(hdev);
}
static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
@@ -605,6 +631,45 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
conn->state = BT_CONNECT;
}
static void hci_req_directed_advertising(struct hci_request *req,
struct hci_conn *conn)
{
struct hci_dev *hdev = req->hdev;
struct hci_cp_le_set_adv_param cp;
u8 own_addr_type;
u8 enable;
enable = 0x00;
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
/* Clear the HCI_ADVERTISING bit temporarily so that the
* hci_update_random_address knows that it's safe to go ahead
* and write a new random address. The flag will be set back on
* as soon as the SET_ADV_ENABLE HCI command completes.
*/
clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
/* Set require_privacy to false so that the remote device has a
* chance of identifying us.
*/
if (hci_update_random_address(req, false, &own_addr_type) < 0)
return;
memset(&cp, 0, sizeof(cp));
cp.type = LE_ADV_DIRECT_IND;
cp.own_address_type = own_addr_type;
cp.direct_addr_type = conn->dst_type;
bacpy(&cp.direct_addr, &conn->dst);
cp.channel_map = hdev->le_adv_channel_map;
hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
enable = 0x01;
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
conn->state = BT_CONNECT;
}
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u8 auth_type)
{
@@ -614,9 +679,6 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
struct hci_request req;
int err;
if (test_bit(HCI_ADVERTISING, &hdev->flags))
return ERR_PTR(-ENOTSUPP);
/* Some devices send ATT messages as soon as the physical link is
* established. To be able to handle these ATT messages, the user-
* space first establishes the connection and then starts the pairing
@@ -664,13 +726,20 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
return ERR_PTR(-ENOMEM);
conn->dst_type = dst_type;
conn->out = true;
conn->link_mode |= HCI_LM_MASTER;
conn->sec_level = BT_SECURITY_LOW;
conn->pending_sec_level = sec_level;
conn->auth_type = auth_type;
hci_req_init(&req, hdev);
if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
hci_req_directed_advertising(&req, conn);
goto create_conn;
}
conn->out = true;
conn->link_mode |= HCI_LM_MASTER;
params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
if (params) {
conn->le_conn_min_interval = params->conn_min_interval;
@@ -680,8 +749,6 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
conn->le_conn_max_interval = hdev->le_conn_max_interval;
}
hci_req_init(&req, hdev);
/* If controller is scanning, we stop it since some controllers are
* not able to scan and connect at the same time. Also set the
* HCI_LE_SCAN_INTERRUPTED flag so that the command complete
@@ -695,6 +762,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
hci_req_add_le_create_conn(&req, conn);
create_conn:
err = hci_req_run(&req, create_le_conn_complete);
if (err) {
hci_conn_del(conn);

View File

@@ -34,6 +34,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include "smp.h"
@@ -579,6 +580,62 @@ static int sniff_max_interval_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
sniff_max_interval_set, "%llu\n");
static int conn_info_min_age_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
if (val == 0 || val > hdev->conn_info_max_age)
return -EINVAL;
hci_dev_lock(hdev);
hdev->conn_info_min_age = val;
hci_dev_unlock(hdev);
return 0;
}
static int conn_info_min_age_get(void *data, u64 *val)
{
struct hci_dev *hdev = data;
hci_dev_lock(hdev);
*val = hdev->conn_info_min_age;
hci_dev_unlock(hdev);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
conn_info_min_age_set, "%llu\n");
static int conn_info_max_age_set(void *data, u64 val)
{
struct hci_dev *hdev = data;
if (val == 0 || val < hdev->conn_info_min_age)
return -EINVAL;
hci_dev_lock(hdev);
hdev->conn_info_max_age = val;
hci_dev_unlock(hdev);
return 0;
}
static int conn_info_max_age_get(void *data, u64 *val)
{
struct hci_dev *hdev = data;
hci_dev_lock(hdev);
*val = hdev->conn_info_max_age;
hci_dev_unlock(hdev);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
conn_info_max_age_set, "%llu\n");
static int identity_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
@@ -955,14 +1012,9 @@ static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
if (count < 3)
return -EINVAL;
buf = kzalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (copy_from_user(buf, data, count)) {
err = -EFAULT;
goto done;
}
buf = memdup_user(data, count);
if (IS_ERR(buf))
return PTR_ERR(buf);
if (memcmp(buf, "add", 3) == 0) {
n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
@@ -1759,6 +1811,11 @@ static int __hci_init(struct hci_dev *hdev)
&blacklist_fops);
debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
&conn_info_min_age_fops);
debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
&conn_info_max_age_fops);
if (lmp_bredr_capable(hdev)) {
debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
hdev, &inquiry_cache_fops);
@@ -1828,6 +1885,9 @@ static int __hci_init(struct hci_dev *hdev)
&lowpan_debugfs_fops);
debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
&le_auto_conn_fops);
debugfs_create_u16("discov_interleaved_timeout", 0644,
hdev->debugfs,
&hdev->discov_interleaved_timeout);
}
return 0;
@@ -2033,12 +2093,11 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
hci_remove_remote_oob_data(hdev, &data->bdaddr);
if (ssp)
*ssp = data->ssp_mode;
*ssp = data->ssp_mode;
ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
if (ie) {
if (ie->data.ssp_mode && ssp)
if (ie->data.ssp_mode)
*ssp = true;
if (ie->name_state == NAME_NEEDED &&
@@ -3791,6 +3850,9 @@ struct hci_dev *hci_alloc_dev(void)
hdev->le_conn_max_interval = 0x0038;
hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
mutex_init(&hdev->lock);
mutex_init(&hdev->req_lock);

View File

@@ -991,10 +991,25 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (!sent)
return;
if (status)
return;
hci_dev_lock(hdev);
if (!status)
mgmt_advertising(hdev, *sent);
/* If we're doing connection initation as peripheral. Set a
* timeout in case something goes wrong.
*/
if (*sent) {
struct hci_conn *conn;
conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
if (conn)
queue_delayed_work(hdev->workqueue,
&conn->le_conn_timeout,
HCI_LE_CONN_TIMEOUT);
}
mgmt_advertising(hdev, *sent);
hci_dev_unlock(hdev);
}
@@ -1018,6 +1033,33 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
static bool has_pending_adv_report(struct hci_dev *hdev)
{
struct discovery_state *d = &hdev->discovery;
return bacmp(&d->last_adv_addr, BDADDR_ANY);
}
static void clear_pending_adv_report(struct hci_dev *hdev)
{
struct discovery_state *d = &hdev->discovery;
bacpy(&d->last_adv_addr, BDADDR_ANY);
d->last_adv_data_len = 0;
}
static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
{
struct discovery_state *d = &hdev->discovery;
bacpy(&d->last_adv_addr, bdaddr);
d->last_adv_addr_type = bdaddr_type;
d->last_adv_rssi = rssi;
memcpy(d->last_adv_data, data, len);
d->last_adv_data_len = len;
}
static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -1036,9 +1078,25 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
switch (cp->enable) {
case LE_SCAN_ENABLE:
set_bit(HCI_LE_SCAN, &hdev->dev_flags);
if (hdev->le_scan_type == LE_SCAN_ACTIVE)
clear_pending_adv_report(hdev);
break;
case LE_SCAN_DISABLE:
/* We do this here instead of when setting DISCOVERY_STOPPED
* since the latter would potentially require waiting for
* inquiry to stop too.
*/
if (has_pending_adv_report(hdev)) {
struct discovery_state *d = &hdev->discovery;
mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
d->last_adv_addr_type, NULL,
d->last_adv_rssi, 0, 1,
d->last_adv_data,
d->last_adv_data_len, NULL, 0);
}
/* Cancel this timer so that we don't try to disable scanning
* when it's already disabled.
*/
@@ -1187,6 +1245,59 @@ static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
amp_write_rem_assoc_continue(hdev, rp->phy_handle);
}
static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_rssi *rp = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
if (conn)
conn->rssi = rp->rssi;
hci_dev_unlock(hdev);
}
static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_cp_read_tx_power *sent;
struct hci_rp_read_tx_power *rp = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
if (!sent)
return;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
if (!conn)
goto unlock;
switch (sent->type) {
case 0x00:
conn->tx_power = rp->tx_power;
break;
case 0x01:
conn->max_tx_power = rp->tx_power;
break;
}
unlock:
hci_dev_unlock(hdev);
}
static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
{
BT_DBG("%s status 0x%2.2x", hdev->name, status);
@@ -1342,6 +1453,7 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
* is requested.
*/
if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
conn->pending_sec_level != BT_SECURITY_FIPS &&
conn->pending_sec_level != BT_SECURITY_HIGH &&
conn->pending_sec_level != BT_SECURITY_MEDIUM)
return 0;
@@ -1827,7 +1939,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, 0, !name_known, ssp, NULL,
0);
0, NULL, 0);
}
hci_dev_unlock(hdev);
@@ -2579,6 +2691,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_cc_write_remote_amp_assoc(hdev, skb);
break;
case HCI_OP_READ_RSSI:
hci_cc_read_rssi(hdev, skb);
break;
case HCI_OP_READ_TX_POWER:
hci_cc_read_tx_power(hdev, skb);
break;
default:
BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
break;
@@ -2957,7 +3077,8 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
}
if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
conn->pending_sec_level == BT_SECURITY_HIGH) {
(conn->pending_sec_level == BT_SECURITY_HIGH ||
conn->pending_sec_level == BT_SECURITY_FIPS)) {
BT_DBG("%s ignoring key unauthenticated for high security",
hdev->name);
goto not_found;
@@ -3102,7 +3223,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
false, &ssp);
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi,
!name_known, ssp, NULL, 0);
!name_known, ssp, NULL, 0, NULL, 0);
}
} else {
struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
@@ -3120,7 +3241,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
false, &ssp);
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi,
!name_known, ssp, NULL, 0);
!name_known, ssp, NULL, 0, NULL, 0);
}
}
@@ -3309,7 +3430,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
eir_len = eir_get_length(info->data, sizeof(info->data));
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi, !name_known,
ssp, info->data, eir_len);
ssp, info->data, eir_len, NULL, 0);
}
hci_dev_unlock(hdev);
@@ -3367,24 +3488,20 @@ unlock:
static u8 hci_get_auth_req(struct hci_conn *conn)
{
/* If remote requests dedicated bonding follow that lead */
if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
/* If both remote and local IO capabilities allow MITM
* protection then require it, otherwise don't */
if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
return HCI_AT_DEDICATED_BONDING;
else
return HCI_AT_DEDICATED_BONDING_MITM;
}
/* If remote requests no-bonding follow that lead */
if (conn->remote_auth == HCI_AT_NO_BONDING ||
conn->remote_auth == HCI_AT_NO_BONDING_MITM)
return conn->remote_auth | (conn->auth_type & 0x01);
return conn->auth_type;
/* If both remote and local have enough IO capabilities, require
* MITM protection
*/
if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
return conn->remote_auth | 0x01;
/* No MITM protection possible so ignore remote requirement */
return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
}
static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -3414,8 +3531,21 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
* to DisplayYesNo as it is not supported by BT spec. */
cp.capability = (conn->io_capability == 0x04) ?
HCI_IO_DISPLAY_YESNO : conn->io_capability;
conn->auth_type = hci_get_auth_req(conn);
cp.authentication = conn->auth_type;
/* If we are initiators, there is no remote information yet */
if (conn->remote_auth == 0xff) {
cp.authentication = conn->auth_type;
/* Request MITM protection if our IO caps allow it
* except for the no-bonding case
*/
if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
cp.authentication != HCI_AT_NO_BONDING)
cp.authentication |= 0x01;
} else {
conn->auth_type = hci_get_auth_req(conn);
cp.authentication = conn->auth_type;
}
if (hci_find_remote_oob_data(hdev, &conn->dst) &&
(conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
@@ -3483,12 +3613,9 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
rem_mitm = (conn->remote_auth & 0x01);
/* If we require MITM but the remote device can't provide that
* (it has NoInputNoOutput) then reject the confirmation
* request. The only exception is when we're dedicated bonding
* initiators (connect_cfm_cb set) since then we always have the MITM
* bit set. */
if (!conn->connect_cfm_cb && loc_mitm &&
conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
* (it has NoInputNoOutput) then reject the confirmation request
*/
if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
BT_DBG("Rejecting request: remote device can't provide MITM");
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
sizeof(ev->bdaddr), &ev->bdaddr);
@@ -3846,17 +3973,6 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn->dst_type = ev->bdaddr_type;
/* The advertising parameters for own address type
* define which source address and source address
* type this connections has.
*/
if (bacmp(&conn->src, BDADDR_ANY)) {
conn->src_type = ADDR_LE_DEV_PUBLIC;
} else {
bacpy(&conn->src, &hdev->static_addr);
conn->src_type = ADDR_LE_DEV_RANDOM;
}
if (ev->role == LE_CONN_ROLE_MASTER) {
conn->out = true;
conn->link_mode |= HCI_LM_MASTER;
@@ -3881,27 +3997,24 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
&conn->init_addr,
&conn->init_addr_type);
}
} else {
/* Set the responder (our side) address type based on
* the advertising address type.
*/
conn->resp_addr_type = hdev->adv_addr_type;
if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
bacpy(&conn->resp_addr, &hdev->random_addr);
else
bacpy(&conn->resp_addr, &hdev->bdaddr);
conn->init_addr_type = ev->bdaddr_type;
bacpy(&conn->init_addr, &ev->bdaddr);
}
} else {
cancel_delayed_work(&conn->le_conn_timeout);
}
/* Ensure that the hci_conn contains the identity address type
* regardless of which address the connection was made with.
*/
hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
if (!conn->out) {
/* Set the responder (our side) address type based on
* the advertising address type.
*/
conn->resp_addr_type = hdev->adv_addr_type;
if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
bacpy(&conn->resp_addr, &hdev->random_addr);
else
bacpy(&conn->resp_addr, &hdev->bdaddr);
conn->init_addr_type = ev->bdaddr_type;
bacpy(&conn->init_addr, &ev->bdaddr);
}
/* Lookup the identity address from the stored connection
* address and address type.
@@ -3981,25 +4094,97 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
}
}
static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
{
struct discovery_state *d = &hdev->discovery;
bool match;
/* Passive scanning shouldn't trigger any device found events */
if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
check_pending_le_conn(hdev, bdaddr, bdaddr_type);
return;
}
/* If there's nothing pending either store the data from this
* event or send an immediate device found event if the data
* should not be stored for later.
*/
if (!has_pending_adv_report(hdev)) {
/* If the report will trigger a SCAN_REQ store it for
* later merging.
*/
if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
store_pending_adv_report(hdev, bdaddr, bdaddr_type,
rssi, data, len);
return;
}
mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
rssi, 0, 1, data, len, NULL, 0);
return;
}
/* Check if the pending report is for the same device as the new one */
match = (!bacmp(bdaddr, &d->last_adv_addr) &&
bdaddr_type == d->last_adv_addr_type);
/* If the pending data doesn't match this report or this isn't a
* scan response (e.g. we got a duplicate ADV_IND) then force
* sending of the pending data.
*/
if (type != LE_ADV_SCAN_RSP || !match) {
/* Send out whatever is in the cache, but skip duplicates */
if (!match)
mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
d->last_adv_addr_type, NULL,
d->last_adv_rssi, 0, 1,
d->last_adv_data,
d->last_adv_data_len, NULL, 0);
/* If the new report will trigger a SCAN_REQ store it for
* later merging.
*/
if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
store_pending_adv_report(hdev, bdaddr, bdaddr_type,
rssi, data, len);
return;
}
/* The advertising reports cannot be merged, so clear
* the pending report and send out a device found event.
*/
clear_pending_adv_report(hdev);
mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
rssi, 0, 1, data, len, NULL, 0);
return;
}
/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
* the new event is a SCAN_RSP. We can therefore proceed with
* sending a merged device found event.
*/
mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
d->last_adv_addr_type, NULL, rssi, 0, 1, data, len,
d->last_adv_data, d->last_adv_data_len);
clear_pending_adv_report(hdev);
}
static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
u8 num_reports = skb->data[0];
void *ptr = &skb->data[1];
s8 rssi;
hci_dev_lock(hdev);
while (num_reports--) {
struct hci_ev_le_advertising_info *ev = ptr;
if (ev->evt_type == LE_ADV_IND ||
ev->evt_type == LE_ADV_DIRECT_IND)
check_pending_le_conn(hdev, &ev->bdaddr,
ev->bdaddr_type);
s8 rssi;
rssi = ev->data[ev->length];
mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
NULL, rssi, 0, 1, ev->data, ev->length);
process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
ev->bdaddr_type, rssi, ev->data, ev->length);
ptr += sizeof(*ev) + ev->length + 1;
}

View File

@@ -143,7 +143,7 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
if (!skb_copy) {
/* Create a private copy with headroom */
skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
if (!skb_copy)
continue;
@@ -247,8 +247,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_mon_hdr *hdr;
/* Create a private copy with headroom */
skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
GFP_ATOMIC);
skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE,
GFP_ATOMIC, true);
if (!skb_copy)
continue;
@@ -524,16 +524,7 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
case HCISETRAW:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
return -EPERM;
if (arg)
set_bit(HCI_RAW, &hdev->flags);
else
clear_bit(HCI_RAW, &hdev->flags);
return 0;
return -EOPNOTSUPP;
case HCIGETCONNINFO:
return hci_get_conn_info(hdev, (void __user *) arg);

View File

@@ -471,8 +471,14 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
chan->max_tx = L2CAP_DEFAULT_MAX_TX;
chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
chan->remote_max_tx = chan->max_tx;
chan->remote_tx_win = chan->tx_win;
chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
chan->sec_level = BT_SECURITY_LOW;
chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
chan->conf_state = 0;
set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
}

View File

@@ -1180,13 +1180,16 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
/* Check for backlog size */
if (sk_acceptq_is_full(parent)) {
BT_DBG("backlog full %d", parent->sk_ack_backlog);
release_sock(parent);
return NULL;
}
sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
GFP_ATOMIC);
if (!sk)
if (!sk) {
release_sock(parent);
return NULL;
}
bt_sock_reclassify_lock(sk, BTPROTO_L2CAP);

View File

@@ -58,6 +58,7 @@ int bt_to_errno(__u16 code)
return EIO;
case 0x04:
case 0x3c:
return EHOSTDOWN;
case 0x05:

View File

@@ -29,12 +29,13 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
#include "smp.h"
#define MGMT_VERSION 1
#define MGMT_REVISION 5
#define MGMT_REVISION 6
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@@ -83,6 +84,7 @@ static const u16 mgmt_commands[] = {
MGMT_OP_SET_DEBUG_KEYS,
MGMT_OP_SET_PRIVACY,
MGMT_OP_LOAD_IRKS,
MGMT_OP_GET_CONN_INFO,
};
static const u16 mgmt_events[] = {
@@ -2850,10 +2852,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
}
sec_level = BT_SECURITY_MEDIUM;
if (cp->io_cap == 0x03)
auth_type = HCI_AT_DEDICATED_BONDING;
else
auth_type = HCI_AT_DEDICATED_BONDING_MITM;
auth_type = HCI_AT_DEDICATED_BONDING;
if (cp->addr.type == BDADDR_BREDR) {
conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
@@ -3351,6 +3350,8 @@ static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
static void start_discovery_complete(struct hci_dev *hdev, u8 status)
{
unsigned long timeout = 0;
BT_DBG("status %d", status);
if (status) {
@@ -3366,13 +3367,11 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)
switch (hdev->discovery.type) {
case DISCOV_TYPE_LE:
queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
DISCOV_LE_TIMEOUT);
timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
break;
case DISCOV_TYPE_INTERLEAVED:
queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
DISCOV_INTERLEAVED_TIMEOUT);
timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
break;
case DISCOV_TYPE_BREDR:
@@ -3381,6 +3380,11 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)
default:
BT_ERR("Invalid discovery type %d", hdev->discovery.type);
}
if (!timeout)
return;
queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
}
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
@@ -4530,7 +4534,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
for (i = 0; i < key_count; i++) {
struct mgmt_ltk_info *key = &cp->keys[i];
u8 type, addr_type;
u8 type, addr_type, authenticated;
if (key->addr.type == BDADDR_LE_PUBLIC)
addr_type = ADDR_LE_DEV_PUBLIC;
@@ -4542,8 +4546,19 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
else
type = HCI_SMP_LTK_SLAVE;
switch (key->type) {
case MGMT_LTK_UNAUTHENTICATED:
authenticated = 0x00;
break;
case MGMT_LTK_AUTHENTICATED:
authenticated = 0x01;
break;
default:
continue;
}
hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
key->type, key->val, key->enc_size, key->ediv,
authenticated, key->val, key->enc_size, key->ediv,
key->rand);
}
@@ -4555,6 +4570,218 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
return err;
}
struct cmd_conn_lookup {
struct hci_conn *conn;
bool valid_tx_power;
u8 mgmt_status;
};
static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
{
struct cmd_conn_lookup *match = data;
struct mgmt_cp_get_conn_info *cp;
struct mgmt_rp_get_conn_info rp;
struct hci_conn *conn = cmd->user_data;
if (conn != match->conn)
return;
cp = (struct mgmt_cp_get_conn_info *) cmd->param;
memset(&rp, 0, sizeof(rp));
bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
rp.addr.type = cp->addr.type;
if (!match->mgmt_status) {
rp.rssi = conn->rssi;
if (match->valid_tx_power) {
rp.tx_power = conn->tx_power;
rp.max_tx_power = conn->max_tx_power;
} else {
rp.tx_power = HCI_TX_POWER_INVALID;
rp.max_tx_power = HCI_TX_POWER_INVALID;
}
}
cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
match->mgmt_status, &rp, sizeof(rp));
hci_conn_drop(conn);
mgmt_pending_remove(cmd);
}
static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
{
struct hci_cp_read_rssi *cp;
struct hci_conn *conn;
struct cmd_conn_lookup match;
u16 handle;
BT_DBG("status 0x%02x", status);
hci_dev_lock(hdev);
/* TX power data is valid in case request completed successfully,
* otherwise we assume it's not valid. At the moment we assume that
* either both or none of current and max values are valid to keep code
* simple.
*/
match.valid_tx_power = !status;
/* Commands sent in request are either Read RSSI or Read Transmit Power
* Level so we check which one was last sent to retrieve connection
* handle. Both commands have handle as first parameter so it's safe to
* cast data on the same command struct.
*
* First command sent is always Read RSSI and we fail only if it fails.
* In other case we simply override error to indicate success as we
* already remembered if TX power value is actually valid.
*/
cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
if (!cp) {
cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
status = 0;
}
if (!cp) {
BT_ERR("invalid sent_cmd in response");
goto unlock;
}
handle = __le16_to_cpu(cp->handle);
conn = hci_conn_hash_lookup_handle(hdev, handle);
if (!conn) {
BT_ERR("unknown handle (%d) in response", handle);
goto unlock;
}
match.conn = conn;
match.mgmt_status = mgmt_status(status);
/* Cache refresh is complete, now reply for mgmt request for given
* connection only.
*/
mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
get_conn_info_complete, &match);
unlock:
hci_dev_unlock(hdev);
}
static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_cp_get_conn_info *cp = data;
struct mgmt_rp_get_conn_info rp;
struct hci_conn *conn;
unsigned long conn_info_age;
int err = 0;
BT_DBG("%s", hdev->name);
memset(&rp, 0, sizeof(rp));
bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
rp.addr.type = cp->addr.type;
if (!bdaddr_type_is_valid(cp->addr.type))
return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
MGMT_STATUS_INVALID_PARAMS,
&rp, sizeof(rp));
hci_dev_lock(hdev);
if (!hdev_is_powered(hdev)) {
err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
goto unlock;
}
if (cp->addr.type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
&cp->addr.bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
if (!conn || conn->state != BT_CONNECTED) {
err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
goto unlock;
}
/* To avoid client trying to guess when to poll again for information we
* calculate conn info age as random value between min/max set in hdev.
*/
conn_info_age = hdev->conn_info_min_age +
prandom_u32_max(hdev->conn_info_max_age -
hdev->conn_info_min_age);
/* Query controller to refresh cached values if they are too old or were
* never read.
*/
if (time_after(jiffies, conn->conn_info_timestamp +
msecs_to_jiffies(conn_info_age)) ||
!conn->conn_info_timestamp) {
struct hci_request req;
struct hci_cp_read_tx_power req_txp_cp;
struct hci_cp_read_rssi req_rssi_cp;
struct pending_cmd *cmd;
hci_req_init(&req, hdev);
req_rssi_cp.handle = cpu_to_le16(conn->handle);
hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
&req_rssi_cp);
/* For LE links TX power does not change thus we don't need to
* query for it once value is known.
*/
if (!bdaddr_type_is_le(cp->addr.type) ||
conn->tx_power == HCI_TX_POWER_INVALID) {
req_txp_cp.handle = cpu_to_le16(conn->handle);
req_txp_cp.type = 0x00;
hci_req_add(&req, HCI_OP_READ_TX_POWER,
sizeof(req_txp_cp), &req_txp_cp);
}
/* Max TX power needs to be read only once per connection */
if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
req_txp_cp.handle = cpu_to_le16(conn->handle);
req_txp_cp.type = 0x01;
hci_req_add(&req, HCI_OP_READ_TX_POWER,
sizeof(req_txp_cp), &req_txp_cp);
}
err = hci_req_run(&req, conn_info_refresh_complete);
if (err < 0)
goto unlock;
cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
data, len);
if (!cmd) {
err = -ENOMEM;
goto unlock;
}
hci_conn_hold(conn);
cmd->user_data = conn;
conn->conn_info_timestamp = jiffies;
} else {
/* Cache is valid, just reply with values cached in hci_conn */
rp.rssi = conn->rssi;
rp.tx_power = conn->tx_power;
rp.max_tx_power = conn->max_tx_power;
err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
}
unlock:
hci_dev_unlock(hdev);
return err;
}
static const struct mgmt_handler {
int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len);
@@ -4610,6 +4837,7 @@ static const struct mgmt_handler {
{ set_debug_keys, false, MGMT_SETTING_SIZE },
{ set_privacy, false, MGMT_SET_PRIVACY_SIZE },
{ load_irks, true, MGMT_LOAD_IRKS_SIZE },
{ get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
};
@@ -5005,6 +5233,14 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
}
static u8 mgmt_ltk_type(struct smp_ltk *ltk)
{
if (ltk->authenticated)
return MGMT_LTK_AUTHENTICATED;
return MGMT_LTK_UNAUTHENTICATED;
}
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
{
struct mgmt_ev_new_long_term_key ev;
@@ -5030,7 +5266,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
ev.key.type = key->authenticated;
ev.key.type = mgmt_ltk_type(key);
ev.key.enc_size = key->enc_size;
ev.key.ediv = key->ediv;
ev.key.rand = key->rand;
@@ -5668,8 +5904,9 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
}
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
ssp, u8 *eir, u16 eir_len)
u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
u8 scan_rsp_len)
{
char buf[512];
struct mgmt_ev_device_found *ev = (void *) buf;
@@ -5679,8 +5916,10 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
if (!hci_discovery_active(hdev))
return;
/* Leave 5 bytes for a potential CoD field */
if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
/* Make sure that the buffer is big enough. The 5 extra bytes
* are for the potential CoD field.
*/
if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
return;
memset(buf, 0, sizeof(buf));
@@ -5707,8 +5946,11 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
dev_class, 3);
ev->eir_len = cpu_to_le16(eir_len);
ev_size = sizeof(*ev) + eir_len;
if (scan_rsp_len > 0)
memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
}

View File

@@ -307,7 +307,7 @@ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)
setup_timer(&d->timer, rfcomm_dlc_timeout, (unsigned long)d);
skb_queue_head_init(&d->tx_queue);
spin_lock_init(&d->lock);
mutex_init(&d->lock);
atomic_set(&d->refcnt, 1);
rfcomm_dlc_clear_state(d);

View File

@@ -70,7 +70,7 @@ struct rfcomm_dev {
};
static LIST_HEAD(rfcomm_dev_list);
static DEFINE_SPINLOCK(rfcomm_dev_lock);
static DEFINE_MUTEX(rfcomm_dev_lock);
static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);
static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err);
@@ -96,9 +96,9 @@ static void rfcomm_dev_destruct(struct tty_port *port)
if (dev->tty_dev)
tty_unregister_device(rfcomm_tty_driver, dev->id);
spin_lock(&rfcomm_dev_lock);
mutex_lock(&rfcomm_dev_lock);
list_del(&dev->list);
spin_unlock(&rfcomm_dev_lock);
mutex_unlock(&rfcomm_dev_lock);
kfree(dev);
@@ -161,14 +161,14 @@ static struct rfcomm_dev *rfcomm_dev_get(int id)
{
struct rfcomm_dev *dev;
spin_lock(&rfcomm_dev_lock);
mutex_lock(&rfcomm_dev_lock);
dev = __rfcomm_dev_lookup(id);
if (dev && !tty_port_get(&dev->port))
dev = NULL;
spin_unlock(&rfcomm_dev_lock);
mutex_unlock(&rfcomm_dev_lock);
return dev;
}
@@ -224,7 +224,7 @@ static struct rfcomm_dev *__rfcomm_dev_add(struct rfcomm_dev_req *req,
if (!dev)
return ERR_PTR(-ENOMEM);
spin_lock(&rfcomm_dev_lock);
mutex_lock(&rfcomm_dev_lock);
if (req->dev_id < 0) {
dev->id = 0;
@@ -305,11 +305,11 @@ static struct rfcomm_dev *__rfcomm_dev_add(struct rfcomm_dev_req *req,
holds reference to this module. */
__module_get(THIS_MODULE);
spin_unlock(&rfcomm_dev_lock);
mutex_unlock(&rfcomm_dev_lock);
return dev;
out:
spin_unlock(&rfcomm_dev_lock);
mutex_unlock(&rfcomm_dev_lock);
kfree(dev);
return ERR_PTR(err);
}
@@ -524,7 +524,7 @@ static int rfcomm_get_dev_list(void __user *arg)
di = dl->dev_info;
spin_lock(&rfcomm_dev_lock);
mutex_lock(&rfcomm_dev_lock);
list_for_each_entry(dev, &rfcomm_dev_list, list) {
if (!tty_port_get(&dev->port))
@@ -540,7 +540,7 @@ static int rfcomm_get_dev_list(void __user *arg)
break;
}
spin_unlock(&rfcomm_dev_lock);
mutex_unlock(&rfcomm_dev_lock);
dl->dev_num = n;
size = sizeof(*dl) + n * sizeof(*di);

View File

@@ -35,6 +35,33 @@
#define AUTH_REQ_MASK 0x07
#define SMP_FLAG_TK_VALID 1
#define SMP_FLAG_CFM_PENDING 2
#define SMP_FLAG_MITM_AUTH 3
#define SMP_FLAG_COMPLETE 4
#define SMP_FLAG_INITIATOR 5
struct smp_chan {
struct l2cap_conn *conn;
u8 preq[7]; /* SMP Pairing Request */
u8 prsp[7]; /* SMP Pairing Response */
u8 prnd[16]; /* SMP Pairing Random (local) */
u8 rrnd[16]; /* SMP Pairing Random (remote) */
u8 pcnf[16]; /* SMP Pairing Confirm */
u8 tk[16]; /* SMP Temporary Key */
u8 enc_key_size;
u8 remote_key_dist;
bdaddr_t id_addr;
u8 id_addr_type;
u8 irk[16];
struct smp_csrk *csrk;
struct smp_csrk *slave_csrk;
struct smp_ltk *ltk;
struct smp_ltk *slave_ltk;
struct smp_irk *remote_irk;
unsigned long flags;
};
static inline void swap128(const u8 src[16], u8 dst[16])
{
int i;
@@ -369,7 +396,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
/* Initialize key for JUST WORKS */
memset(smp->tk, 0, sizeof(smp->tk));
clear_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
clear_bit(SMP_FLAG_TK_VALID, &smp->flags);
BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
@@ -388,19 +415,18 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
method = JUST_WORKS;
/* Don't confirm locally initiated pairing attempts */
if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR,
&smp->smp_flags))
if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, &smp->flags))
method = JUST_WORKS;
/* If Just Works, Continue with Zero TK */
if (method == JUST_WORKS) {
set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
set_bit(SMP_FLAG_TK_VALID, &smp->flags);
return 0;
}
/* Not Just Works/Confirm results in MITM Authentication */
if (method != JUST_CFM)
set_bit(SMP_FLAG_MITM_AUTH, &smp->smp_flags);
set_bit(SMP_FLAG_MITM_AUTH, &smp->flags);
/* If both devices have Keyoard-Display I/O, the master
* Confirms and the slave Enters the passkey.
@@ -419,7 +445,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
passkey %= 1000000;
put_unaligned_le32(passkey, smp->tk);
BT_DBG("PassKey: %d", passkey);
set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
set_bit(SMP_FLAG_TK_VALID, &smp->flags);
}
hci_dev_lock(hcon->hdev);
@@ -441,15 +467,13 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
return ret;
}
static void confirm_work(struct work_struct *work)
static u8 smp_confirm(struct smp_chan *smp)
{
struct smp_chan *smp = container_of(work, struct smp_chan, confirm);
struct l2cap_conn *conn = smp->conn;
struct hci_dev *hdev = conn->hcon->hdev;
struct crypto_blkcipher *tfm = hdev->tfm_aes;
struct smp_cmd_pairing_confirm cp;
int ret;
u8 reason;
BT_DBG("conn %p", conn);
@@ -463,35 +487,27 @@ static void confirm_work(struct work_struct *work)
hci_dev_unlock(hdev);
if (ret) {
reason = SMP_UNSPECIFIED;
goto error;
}
if (ret)
return SMP_UNSPECIFIED;
clear_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
clear_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
return;
error:
smp_failure(conn, reason);
return 0;
}
static void random_work(struct work_struct *work)
static u8 smp_random(struct smp_chan *smp)
{
struct smp_chan *smp = container_of(work, struct smp_chan, random);
struct l2cap_conn *conn = smp->conn;
struct hci_conn *hcon = conn->hcon;
struct hci_dev *hdev = hcon->hdev;
struct crypto_blkcipher *tfm = hdev->tfm_aes;
u8 reason, confirm[16];
u8 confirm[16];
int ret;
if (IS_ERR_OR_NULL(tfm)) {
reason = SMP_UNSPECIFIED;
goto error;
}
if (IS_ERR_OR_NULL(tfm))
return SMP_UNSPECIFIED;
BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
@@ -504,15 +520,12 @@ static void random_work(struct work_struct *work)
hci_dev_unlock(hdev);
if (ret) {
reason = SMP_UNSPECIFIED;
goto error;
}
if (ret)
return SMP_UNSPECIFIED;
if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
BT_ERR("Pairing failed (confirmation values mismatch)");
reason = SMP_CONFIRM_FAILED;
goto error;
return SMP_CONFIRM_FAILED;
}
if (hcon->out) {
@@ -525,10 +538,8 @@ static void random_work(struct work_struct *work)
memset(stk + smp->enc_key_size, 0,
SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) {
reason = SMP_UNSPECIFIED;
goto error;
}
if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
return SMP_UNSPECIFIED;
hci_le_start_enc(hcon, ediv, rand, stk);
hcon->enc_key_size = smp->enc_key_size;
@@ -550,10 +561,7 @@ static void random_work(struct work_struct *work)
ediv, rand);
}
return;
error:
smp_failure(conn, reason);
return 0;
}
static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
@@ -564,9 +572,6 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
if (!smp)
return NULL;
INIT_WORK(&smp->confirm, confirm_work);
INIT_WORK(&smp->random, random_work);
smp->conn = conn;
conn->smp_chan = smp;
conn->hcon->smp_conn = conn;
@@ -583,7 +588,7 @@ void smp_chan_destroy(struct l2cap_conn *conn)
BUG_ON(!smp);
complete = test_bit(SMP_FLAG_COMPLETE, &smp->smp_flags);
complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags);
mgmt_smp_complete(conn->hcon, complete);
kfree(smp->csrk);
@@ -634,7 +639,7 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
put_unaligned_le32(value, smp->tk);
/* Fall Through */
case MGMT_OP_USER_CONFIRM_REPLY:
set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
set_bit(SMP_FLAG_TK_VALID, &smp->flags);
break;
case MGMT_OP_USER_PASSKEY_NEG_REPLY:
case MGMT_OP_USER_CONFIRM_NEG_REPLY:
@@ -646,8 +651,11 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
}
/* If it is our turn to send Pairing Confirm, do so now */
if (test_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags))
queue_work(hcon->hdev->workqueue, &smp->confirm);
if (test_bit(SMP_FLAG_CFM_PENDING, &smp->flags)) {
u8 rsp = smp_confirm(smp);
if (rsp)
smp_failure(conn, rsp);
}
return 0;
}
@@ -656,14 +664,13 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_pairing rsp, *req = (void *) skb->data;
struct smp_chan *smp;
u8 key_size;
u8 auth = SMP_AUTH_NONE;
u8 key_size, auth;
int ret;
BT_DBG("conn %p", conn);
if (skb->len < sizeof(*req))
return SMP_UNSPECIFIED;
return SMP_INVALID_PARAMS;
if (conn->hcon->link_mode & HCI_LM_MASTER)
return SMP_CMD_NOTSUPP;
@@ -681,8 +688,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
skb_pull(skb, sizeof(*req));
/* We didn't start the pairing, so match remote */
if (req->auth_req & SMP_AUTH_BONDING)
auth = req->auth_req;
auth = req->auth_req;
conn->hcon->pending_sec_level = authreq_to_seclevel(auth);
@@ -704,7 +710,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (ret)
return SMP_UNSPECIFIED;
clear_bit(SMP_FLAG_INITIATOR, &smp->smp_flags);
clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
return 0;
}
@@ -713,14 +719,13 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
struct smp_chan *smp = conn->smp_chan;
struct hci_dev *hdev = conn->hcon->hdev;
u8 key_size, auth = SMP_AUTH_NONE;
int ret;
BT_DBG("conn %p", conn);
if (skb->len < sizeof(*rsp))
return SMP_UNSPECIFIED;
return SMP_INVALID_PARAMS;
if (!(conn->hcon->link_mode & HCI_LM_MASTER))
return SMP_CMD_NOTSUPP;
@@ -753,11 +758,11 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
if (ret)
return SMP_UNSPECIFIED;
set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
/* Can't compose response until we have been confirmed */
if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags))
queue_work(hdev->workqueue, &smp->confirm);
if (test_bit(SMP_FLAG_TK_VALID, &smp->flags))
return smp_confirm(smp);
return 0;
}
@@ -765,12 +770,11 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_chan *smp = conn->smp_chan;
struct hci_dev *hdev = conn->hcon->hdev;
BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
if (skb->len < sizeof(smp->pcnf))
return SMP_UNSPECIFIED;
return SMP_INVALID_PARAMS;
memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));
skb_pull(skb, sizeof(smp->pcnf));
@@ -778,10 +782,10 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
if (conn->hcon->out)
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
smp->prnd);
else if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags))
queue_work(hdev->workqueue, &smp->confirm);
else if (test_bit(SMP_FLAG_TK_VALID, &smp->flags))
return smp_confirm(smp);
else
set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
return 0;
}
@@ -789,19 +793,16 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_chan *smp = conn->smp_chan;
struct hci_dev *hdev = conn->hcon->hdev;
BT_DBG("conn %p", conn);
if (skb->len < sizeof(smp->rrnd))
return SMP_UNSPECIFIED;
return SMP_INVALID_PARAMS;
memcpy(smp->rrnd, skb->data, sizeof(smp->rrnd));
skb_pull(skb, sizeof(smp->rrnd));
queue_work(hdev->workqueue, &smp->random);
return 0;
return smp_random(smp);
}
static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
@@ -836,7 +837,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
BT_DBG("conn %p", conn);
if (skb->len < sizeof(*rp))
return SMP_UNSPECIFIED;
return SMP_INVALID_PARAMS;
if (!(conn->hcon->link_mode & HCI_LM_MASTER))
return SMP_CMD_NOTSUPP;
@@ -861,7 +862,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
clear_bit(SMP_FLAG_INITIATOR, &smp->smp_flags);
clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
return 0;
}
@@ -908,10 +909,11 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
authreq = seclevel_to_authreq(sec_level);
/* hcon->auth_type is set by pair_device in mgmt.c. If the MITM
* flag is set we should also set it for the SMP request.
/* Require MITM if IO Capability allows or the security level
* requires it.
*/
if ((hcon->auth_type & 0x01))
if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT ||
sec_level > BT_SECURITY_MEDIUM)
authreq |= SMP_AUTH_MITM;
if (hcon->link_mode & HCI_LM_MASTER) {
@@ -928,7 +930,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
}
set_bit(SMP_FLAG_INITIATOR, &smp->smp_flags);
set_bit(SMP_FLAG_INITIATOR, &smp->flags);
done:
hcon->pending_sec_level = sec_level;
@@ -944,7 +946,7 @@ static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
BT_DBG("conn %p", conn);
if (skb->len < sizeof(*rp))
return SMP_UNSPECIFIED;
return SMP_INVALID_PARAMS;
/* Ignore this PDU if it wasn't requested */
if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY))
@@ -969,7 +971,7 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
BT_DBG("conn %p", conn);
if (skb->len < sizeof(*rp))
return SMP_UNSPECIFIED;
return SMP_INVALID_PARAMS;
/* Ignore this PDU if it wasn't requested */
if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY))
@@ -1001,7 +1003,7 @@ static int smp_cmd_ident_info(struct l2cap_conn *conn, struct sk_buff *skb)
BT_DBG("");
if (skb->len < sizeof(*info))
return SMP_UNSPECIFIED;
return SMP_INVALID_PARAMS;
/* Ignore this PDU if it wasn't requested */
if (!(smp->remote_key_dist & SMP_DIST_ID_KEY))
@@ -1025,7 +1027,7 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
BT_DBG("");
if (skb->len < sizeof(*info))
return SMP_UNSPECIFIED;
return SMP_INVALID_PARAMS;
/* Ignore this PDU if it wasn't requested */
if (!(smp->remote_key_dist & SMP_DIST_ID_KEY))
@@ -1075,7 +1077,7 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
BT_DBG("conn %p", conn);
if (skb->len < sizeof(*rp))
return SMP_UNSPECIFIED;
return SMP_INVALID_PARAMS;
/* Ignore this PDU if it wasn't requested */
if (!(smp->remote_key_dist & SMP_DIST_SIGN))
@@ -1358,7 +1360,7 @@ int smp_distribute_keys(struct l2cap_conn *conn)
clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags);
cancel_delayed_work_sync(&conn->security_timer);
set_bit(SMP_FLAG_COMPLETE, &smp->smp_flags);
set_bit(SMP_FLAG_COMPLETE, &smp->flags);
smp_notify_keys(conn);
smp_chan_destroy(conn);

View File

@@ -111,39 +111,11 @@ struct smp_cmd_security_req {
#define SMP_CMD_NOTSUPP 0x07
#define SMP_UNSPECIFIED 0x08
#define SMP_REPEATED_ATTEMPTS 0x09
#define SMP_INVALID_PARAMS 0x0a
#define SMP_MIN_ENC_KEY_SIZE 7
#define SMP_MAX_ENC_KEY_SIZE 16
#define SMP_FLAG_TK_VALID 1
#define SMP_FLAG_CFM_PENDING 2
#define SMP_FLAG_MITM_AUTH 3
#define SMP_FLAG_COMPLETE 4
#define SMP_FLAG_INITIATOR 5
struct smp_chan {
struct l2cap_conn *conn;
u8 preq[7]; /* SMP Pairing Request */
u8 prsp[7]; /* SMP Pairing Response */
u8 prnd[16]; /* SMP Pairing Random (local) */
u8 rrnd[16]; /* SMP Pairing Random (remote) */
u8 pcnf[16]; /* SMP Pairing Confirm */
u8 tk[16]; /* SMP Temporary Key */
u8 enc_key_size;
u8 remote_key_dist;
bdaddr_t id_addr;
u8 id_addr_type;
u8 irk[16];
struct smp_csrk *csrk;
struct smp_csrk *slave_csrk;
struct smp_ltk *ltk;
struct smp_ltk *slave_ltk;
struct smp_irk *remote_irk;
unsigned long smp_flags;
struct work_struct confirm;
struct work_struct random;
};
/* SMP Commands */
bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level);
int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);

View File

@@ -5,7 +5,7 @@
obj-$(CONFIG_BRIDGE) += bridge.o
bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
br_ioctl.o br_notify.o br_stp.o br_stp_bpdu.o \
br_ioctl.o br_stp.o br_stp_bpdu.o \
br_stp_if.o br_stp_timer.o br_netlink.o
bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o
@@ -16,4 +16,4 @@ bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o
obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/
obj-$(CONFIG_NETFILTER) += netfilter/

View File

@@ -22,6 +22,104 @@
#include "br_private.h"
/*
* Handle changes in state of network devices enslaved to a bridge.
*
* Note: don't care about up/down if bridge itself is down, because
* port state is checked when bridge is brought up.
*/
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net_bridge_port *p;
struct net_bridge *br;
bool changed_addr;
int err;
/* register of bridge completed, add sysfs entries */
if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
br_sysfs_addbr(dev);
return NOTIFY_DONE;
}
/* not a port of a bridge */
p = br_port_get_rtnl(dev);
if (!p)
return NOTIFY_DONE;
br = p->br;
switch (event) {
case NETDEV_CHANGEMTU:
dev_set_mtu(br->dev, br_min_mtu(br));
break;
case NETDEV_CHANGEADDR:
spin_lock_bh(&br->lock);
br_fdb_changeaddr(p, dev->dev_addr);
changed_addr = br_stp_recalculate_bridge_id(br);
spin_unlock_bh(&br->lock);
if (changed_addr)
call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
break;
case NETDEV_CHANGE:
br_port_carrier_check(p);
break;
case NETDEV_FEAT_CHANGE:
netdev_update_features(br->dev);
break;
case NETDEV_DOWN:
spin_lock_bh(&br->lock);
if (br->dev->flags & IFF_UP)
br_stp_disable_port(p);
spin_unlock_bh(&br->lock);
break;
case NETDEV_UP:
if (netif_running(br->dev) && netif_oper_up(dev)) {
spin_lock_bh(&br->lock);
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
}
break;
case NETDEV_UNREGISTER:
br_del_if(br, dev);
break;
case NETDEV_CHANGENAME:
err = br_sysfs_renameif(p);
if (err)
return notifier_from_errno(err);
break;
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
return NOTIFY_BAD;
case NETDEV_RESEND_IGMP:
/* Propagate to master device */
call_netdevice_notifiers(event, br->dev);
break;
}
/* Events that may cause spanning tree to refresh */
if (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
event == NETDEV_CHANGE || event == NETDEV_DOWN)
br_ifinfo_notify(RTM_NEWLINK, p);
return NOTIFY_DONE;
}
static struct notifier_block br_device_notifier = {
.notifier_call = br_device_event
};
static void __net_exit br_net_exit(struct net *net)
{
struct net_device *dev;

View File

@@ -112,6 +112,12 @@ static void br_dev_set_multicast_list(struct net_device *dev)
{
}
static void br_dev_change_rx_flags(struct net_device *dev, int change)
{
if (change & IFF_PROMISC)
br_manage_promisc(netdev_priv(dev));
}
static int br_dev_stop(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
@@ -309,6 +315,7 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_get_stats64 = br_get_stats64,
.ndo_set_mac_address = br_set_mac_address,
.ndo_set_rx_mode = br_dev_set_multicast_list,
.ndo_change_rx_flags = br_dev_change_rx_flags,
.ndo_change_mtu = br_change_mtu,
.ndo_do_ioctl = br_dev_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -348,14 +355,15 @@ void br_dev_setup(struct net_device *dev)
dev->netdev_ops = &br_netdev_ops;
dev->destructor = br_dev_free;
SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
dev->ethtool_ops = &br_ethtool_ops;
SET_NETDEV_DEVTYPE(dev, &br_type);
dev->tx_queue_len = 0;
dev->priv_flags = IFF_EBRIDGE;
dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
NETIF_F_HW_VLAN_CTAG_TX;
dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX;
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
dev->vlan_features = COMMON_FEATURES;
br->dev = dev;
@@ -370,6 +378,7 @@ void br_dev_setup(struct net_device *dev)
br->stp_enabled = BR_NO_STP;
br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
br->designated_root = br->bridge_id;
br->bridge_max_age = br->max_age = 20 * HZ;
@@ -380,4 +389,5 @@ void br_dev_setup(struct net_device *dev)
br_netfilter_rtable_init(br);
br_stp_timer_init(br);
br_multicast_init(br);
br_vlan_init(br);
}

View File

@@ -85,8 +85,58 @@ static void fdb_rcu_free(struct rcu_head *head)
kmem_cache_free(br_fdb_cache, ent);
}
/* When a static FDB entry is added, the mac address from the entry is
* added to the bridge private HW address list and all required ports
* are then updated with the new information.
* Called under RTNL.
*/
static void fdb_add_hw(struct net_bridge *br, const unsigned char *addr)
{
int err;
struct net_bridge_port *p, *tmp;
ASSERT_RTNL();
list_for_each_entry(p, &br->port_list, list) {
if (!br_promisc_port(p)) {
err = dev_uc_add(p->dev, addr);
if (err)
goto undo;
}
}
return;
undo:
list_for_each_entry(tmp, &br->port_list, list) {
if (tmp == p)
break;
if (!br_promisc_port(tmp))
dev_uc_del(tmp->dev, addr);
}
}
/* When a static FDB entry is deleted, the HW address from that entry is
* also removed from the bridge private HW address list and updates all
* the ports with needed information.
* Called under RTNL.
*/
static void fdb_del_hw(struct net_bridge *br, const unsigned char *addr)
{
struct net_bridge_port *p;
ASSERT_RTNL();
list_for_each_entry(p, &br->port_list, list) {
if (!br_promisc_port(p))
dev_uc_del(p->dev, addr);
}
}
static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
{
if (f->is_static)
fdb_del_hw(br, f->addr.addr);
hlist_del_rcu(&f->hlist);
fdb_notify(br, f, RTM_DELNEIGH);
call_rcu(&f->rcu, fdb_rcu_free);
@@ -466,6 +516,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
return -ENOMEM;
fdb->is_local = fdb->is_static = 1;
fdb_add_hw(br, addr);
fdb_notify(br, fdb, RTM_NEWNEIGH);
return 0;
}
@@ -571,6 +622,8 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
goto nla_put_failure;
if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
goto nla_put_failure;
ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
ci.ndm_confirmed = 0;
ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
@@ -592,6 +645,7 @@ static inline size_t fdb_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ndmsg))
+ nla_total_size(ETH_ALEN) /* NDA_LLADDR */
+ nla_total_size(sizeof(u32)) /* NDA_MASTER */
+ nla_total_size(sizeof(u16)) /* NDA_VLAN */
+ nla_total_size(sizeof(struct nda_cacheinfo));
}
@@ -684,13 +738,25 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
}
if (fdb_to_nud(fdb) != state) {
if (state & NUD_PERMANENT)
fdb->is_local = fdb->is_static = 1;
else if (state & NUD_NOARP) {
if (state & NUD_PERMANENT) {
fdb->is_local = 1;
if (!fdb->is_static) {
fdb->is_static = 1;
fdb_add_hw(br, addr);
}
} else if (state & NUD_NOARP) {
fdb->is_local = 0;
fdb->is_static = 1;
} else
fdb->is_local = fdb->is_static = 0;
if (!fdb->is_static) {
fdb->is_static = 1;
fdb_add_hw(br, addr);
}
} else {
fdb->is_local = 0;
if (fdb->is_static) {
fdb->is_static = 0;
fdb_del_hw(br, addr);
}
}
modified = true;
}
@@ -880,3 +946,59 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
out:
return err;
}
int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
{
struct net_bridge_fdb_entry *fdb, *tmp;
int i;
int err;
ASSERT_RTNL();
for (i = 0; i < BR_HASH_SIZE; i++) {
hlist_for_each_entry(fdb, &br->hash[i], hlist) {
/* We only care for static entries */
if (!fdb->is_static)
continue;
err = dev_uc_add(p->dev, fdb->addr.addr);
if (err)
goto rollback;
}
}
return 0;
rollback:
for (i = 0; i < BR_HASH_SIZE; i++) {
hlist_for_each_entry(tmp, &br->hash[i], hlist) {
/* If we reached the fdb that failed, we can stop */
if (tmp == fdb)
break;
/* We only care for static entries */
if (!tmp->is_static)
continue;
dev_uc_del(p->dev, tmp->addr.addr);
}
}
return err;
}
void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
{
struct net_bridge_fdb_entry *fdb;
int i;
ASSERT_RTNL();
for (i = 0; i < BR_HASH_SIZE; i++) {
hlist_for_each_entry_rcu(fdb, &br->hash[i], hlist) {
/* We only care for static entries */
if (!fdb->is_static)
continue;
dev_uc_del(p->dev, fdb->addr.addr);
}
}
}

View File

@@ -85,6 +85,111 @@ void br_port_carrier_check(struct net_bridge_port *p)
spin_unlock_bh(&br->lock);
}
static void br_port_set_promisc(struct net_bridge_port *p)
{
int err = 0;
if (br_promisc_port(p))
return;
err = dev_set_promiscuity(p->dev, 1);
if (err)
return;
br_fdb_unsync_static(p->br, p);
p->flags |= BR_PROMISC;
}
static void br_port_clear_promisc(struct net_bridge_port *p)
{
int err;
/* Check if the port is already non-promisc or if it doesn't
* support UNICAST filtering. Without unicast filtering support
* we'll end up re-enabling promisc mode anyway, so just check for
* it here.
*/
if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
return;
/* Since we'll be clearing the promisc mode, program the port
* first so that we don't have interruption in traffic.
*/
err = br_fdb_sync_static(p->br, p);
if (err)
return;
dev_set_promiscuity(p->dev, -1);
p->flags &= ~BR_PROMISC;
}
/* When a port is added or removed or when certain port flags
* change, this function is called to automatically manage
* promiscuity setting of all the bridge ports. We are always called
* under RTNL so can skip using rcu primitives.
*/
void br_manage_promisc(struct net_bridge *br)
{
struct net_bridge_port *p;
bool set_all = false;
/* If vlan filtering is disabled or bridge interface is placed
* into promiscuous mode, place all ports in promiscuous mode.
*/
if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br))
set_all = true;
list_for_each_entry(p, &br->port_list, list) {
if (set_all) {
br_port_set_promisc(p);
} else {
/* If the number of auto-ports is <= 1, then all other
* ports will have their output configuration
* statically specified through fdbs. Since ingress
* on the auto-port becomes forwarding/egress to other
* ports and egress configuration is statically known,
* we can say that ingress configuration of the
* auto-port is also statically known.
* This lets us disable promiscuous mode and write
* this config to hw.
*/
if (br->auto_cnt == 0 ||
(br->auto_cnt == 1 && br_auto_port(p)))
br_port_clear_promisc(p);
else
br_port_set_promisc(p);
}
}
}
static void nbp_update_port_count(struct net_bridge *br)
{
struct net_bridge_port *p;
u32 cnt = 0;
list_for_each_entry(p, &br->port_list, list) {
if (br_auto_port(p))
cnt++;
}
if (br->auto_cnt != cnt) {
br->auto_cnt = cnt;
br_manage_promisc(br);
}
}
static void nbp_delete_promisc(struct net_bridge_port *p)
{
/* If port is currently promiscuous, unset promiscuity.
* Otherwise, it is a static port so remove all addresses
* from it.
*/
dev_set_allmulti(p->dev, -1);
if (br_promisc_port(p))
dev_set_promiscuity(p->dev, -1);
else
br_fdb_unsync_static(p->br, p);
}
static void release_nbp(struct kobject *kobj)
{
struct net_bridge_port *p
@@ -133,7 +238,7 @@ static void del_nbp(struct net_bridge_port *p)
sysfs_remove_link(br->ifobj, p->dev->name);
dev_set_promiscuity(dev, -1);
nbp_delete_promisc(p);
spin_lock_bh(&br->lock);
br_stp_disable_port(p);
@@ -141,10 +246,11 @@ static void del_nbp(struct net_bridge_port *p)
br_ifinfo_notify(RTM_DELLINK, p);
list_del_rcu(&p->list);
nbp_vlan_flush(p);
br_fdb_delete_by_port(br, p, 1);
list_del_rcu(&p->list);
nbp_update_port_count(br);
dev->priv_flags &= ~IFF_BRIDGE_PORT;
@@ -353,7 +459,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
call_netdevice_notifiers(NETDEV_JOIN, dev);
err = dev_set_promiscuity(dev, 1);
err = dev_set_allmulti(dev, 1);
if (err)
goto put_back;
@@ -384,6 +490,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
list_add_rcu(&p->list, &br->port_list);
nbp_update_port_count(br);
netdev_update_features(br->dev);
if (br->dev->needed_headroom < dev->needed_headroom)
@@ -421,7 +529,7 @@ err2:
kobject_put(&p->kobj);
p = NULL; /* kobject_put frees */
err1:
dev_set_promiscuity(dev, -1);
dev_set_allmulti(dev, -1);
put_back:
dev_put(dev);
kfree(p);
@@ -455,3 +563,11 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
return 0;
}
void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
{
struct net_bridge *br = p->br;
if (mask & BR_AUTO_MASK)
nbp_update_port_count(br);
}

View File

@@ -177,6 +177,8 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
p = br_port_get_rcu(skb->dev);
if (unlikely(is_link_local_ether_addr(dest))) {
u16 fwd_mask = p->br->group_fwd_mask_required;
/*
* See IEEE 802.1D Table 7-10 Reserved addresses
*
@@ -194,7 +196,8 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
case 0x00: /* Bridge Group Address */
/* If STP is turned off,
then must forward to keep loop detection */
if (p->br->stp_enabled == BR_NO_STP)
if (p->br->stp_enabled == BR_NO_STP ||
fwd_mask & (1u << dest[5]))
goto forward;
break;
@@ -203,7 +206,8 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
default:
/* Allow selective forwarding for most other protocols */
if (p->br->group_fwd_mask & (1u << dest[5]))
fwd_mask |= p->br->group_fwd_mask;
if (fwd_mask & (1u << dest[5]))
goto forward;
}

View File

@@ -418,13 +418,13 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
ip.proto = entry->addr.proto;
if (ip.proto == htons(ETH_P_IP)) {
if (timer_pending(&br->ip4_querier.timer))
if (timer_pending(&br->ip4_other_query.timer))
return -EBUSY;
ip.u.ip4 = entry->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
} else {
if (timer_pending(&br->ip6_querier.timer))
if (timer_pending(&br->ip6_other_query.timer))
return -EBUSY;
ip.u.ip6 = entry->addr.u.ip6;

View File

@@ -11,6 +11,7 @@
*/
#include <linux/err.h>
#include <linux/export.h>
#include <linux/if_ether.h>
#include <linux/igmp.h>
#include <linux/jhash.h>
@@ -35,7 +36,7 @@
#include "br_private.h"
static void br_multicast_start_querier(struct net_bridge *br,
struct bridge_mcast_query *query);
struct bridge_mcast_own_query *query);
unsigned int br_mdb_rehash_seq;
static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -761,7 +762,7 @@ static void br_multicast_local_router_expired(unsigned long data)
}
static void br_multicast_querier_expired(struct net_bridge *br,
struct bridge_mcast_query *query)
struct bridge_mcast_own_query *query)
{
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) || br->multicast_disabled)
@@ -777,7 +778,7 @@ static void br_ip4_multicast_querier_expired(unsigned long data)
{
struct net_bridge *br = (void *)data;
br_multicast_querier_expired(br, &br->ip4_query);
br_multicast_querier_expired(br, &br->ip4_own_query);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -785,10 +786,22 @@ static void br_ip6_multicast_querier_expired(unsigned long data)
{
struct net_bridge *br = (void *)data;
br_multicast_querier_expired(br, &br->ip6_query);
br_multicast_querier_expired(br, &br->ip6_own_query);
}
#endif
static void br_multicast_select_own_querier(struct net_bridge *br,
struct br_ip *ip,
struct sk_buff *skb)
{
if (ip->proto == htons(ETH_P_IP))
br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
#if IS_ENABLED(CONFIG_IPV6)
else
br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
#endif
}
static void __br_multicast_send_query(struct net_bridge *br,
struct net_bridge_port *port,
struct br_ip *ip)
@@ -804,17 +817,19 @@ static void __br_multicast_send_query(struct net_bridge *br,
skb->dev = port->dev;
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
dev_queue_xmit);
} else
} else {
br_multicast_select_own_querier(br, ip, skb);
netif_rx(skb);
}
}
static void br_multicast_send_query(struct net_bridge *br,
struct net_bridge_port *port,
struct bridge_mcast_query *query)
struct bridge_mcast_own_query *own_query)
{
unsigned long time;
struct br_ip br_group;
struct bridge_mcast_querier *querier = NULL;
struct bridge_mcast_other_query *other_query = NULL;
if (!netif_running(br->dev) || br->multicast_disabled ||
!br->multicast_querier)
@@ -822,31 +837,32 @@ static void br_multicast_send_query(struct net_bridge *br,
memset(&br_group.u, 0, sizeof(br_group.u));
if (port ? (query == &port->ip4_query) :
(query == &br->ip4_query)) {
querier = &br->ip4_querier;
if (port ? (own_query == &port->ip4_own_query) :
(own_query == &br->ip4_own_query)) {
other_query = &br->ip4_other_query;
br_group.proto = htons(ETH_P_IP);
#if IS_ENABLED(CONFIG_IPV6)
} else {
querier = &br->ip6_querier;
other_query = &br->ip6_other_query;
br_group.proto = htons(ETH_P_IPV6);
#endif
}
if (!querier || timer_pending(&querier->timer))
if (!other_query || timer_pending(&other_query->timer))
return;
__br_multicast_send_query(br, port, &br_group);
time = jiffies;
time += query->startup_sent < br->multicast_startup_query_count ?
time += own_query->startup_sent < br->multicast_startup_query_count ?
br->multicast_startup_query_interval :
br->multicast_query_interval;
mod_timer(&query->timer, time);
mod_timer(&own_query->timer, time);
}
static void br_multicast_port_query_expired(struct net_bridge_port *port,
struct bridge_mcast_query *query)
static void
br_multicast_port_query_expired(struct net_bridge_port *port,
struct bridge_mcast_own_query *query)
{
struct net_bridge *br = port->br;
@@ -868,7 +884,7 @@ static void br_ip4_multicast_port_query_expired(unsigned long data)
{
struct net_bridge_port *port = (void *)data;
br_multicast_port_query_expired(port, &port->ip4_query);
br_multicast_port_query_expired(port, &port->ip4_own_query);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -876,7 +892,7 @@ static void br_ip6_multicast_port_query_expired(unsigned long data)
{
struct net_bridge_port *port = (void *)data;
br_multicast_port_query_expired(port, &port->ip6_query);
br_multicast_port_query_expired(port, &port->ip6_own_query);
}
#endif
@@ -886,11 +902,11 @@ void br_multicast_add_port(struct net_bridge_port *port)
setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
(unsigned long)port);
setup_timer(&port->ip4_query.timer, br_ip4_multicast_port_query_expired,
(unsigned long)port);
setup_timer(&port->ip4_own_query.timer,
br_ip4_multicast_port_query_expired, (unsigned long)port);
#if IS_ENABLED(CONFIG_IPV6)
setup_timer(&port->ip6_query.timer, br_ip6_multicast_port_query_expired,
(unsigned long)port);
setup_timer(&port->ip6_own_query.timer,
br_ip6_multicast_port_query_expired, (unsigned long)port);
#endif
}
@@ -899,7 +915,7 @@ void br_multicast_del_port(struct net_bridge_port *port)
del_timer_sync(&port->multicast_router_timer);
}
static void br_multicast_enable(struct bridge_mcast_query *query)
static void br_multicast_enable(struct bridge_mcast_own_query *query)
{
query->startup_sent = 0;
@@ -916,9 +932,9 @@ void br_multicast_enable_port(struct net_bridge_port *port)
if (br->multicast_disabled || !netif_running(br->dev))
goto out;
br_multicast_enable(&port->ip4_query);
br_multicast_enable(&port->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
br_multicast_enable(&port->ip6_query);
br_multicast_enable(&port->ip6_own_query);
#endif
out:
@@ -938,9 +954,9 @@ void br_multicast_disable_port(struct net_bridge_port *port)
if (!hlist_unhashed(&port->rlist))
hlist_del_init_rcu(&port->rlist);
del_timer(&port->multicast_router_timer);
del_timer(&port->ip4_query.timer);
del_timer(&port->ip4_own_query.timer);
#if IS_ENABLED(CONFIG_IPV6)
del_timer(&port->ip6_query.timer);
del_timer(&port->ip6_own_query.timer);
#endif
spin_unlock(&br->multicast_lock);
}
@@ -1064,15 +1080,80 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
}
#endif
static void
br_multicast_update_querier_timer(struct net_bridge *br,
struct bridge_mcast_querier *querier,
unsigned long max_delay)
static bool br_ip4_multicast_select_querier(struct net_bridge *br,
struct net_bridge_port *port,
__be32 saddr)
{
if (!timer_pending(&querier->timer))
querier->delay_time = jiffies + max_delay;
if (!timer_pending(&br->ip4_own_query.timer) &&
!timer_pending(&br->ip4_other_query.timer))
goto update;
mod_timer(&querier->timer, jiffies + br->multicast_querier_interval);
if (!br->ip4_querier.addr.u.ip4)
goto update;
if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
goto update;
return false;
update:
br->ip4_querier.addr.u.ip4 = saddr;
/* update protected by general multicast_lock by caller */
rcu_assign_pointer(br->ip4_querier.port, port);
return true;
}
#if IS_ENABLED(CONFIG_IPV6)
static bool br_ip6_multicast_select_querier(struct net_bridge *br,
struct net_bridge_port *port,
struct in6_addr *saddr)
{
if (!timer_pending(&br->ip6_own_query.timer) &&
!timer_pending(&br->ip6_other_query.timer))
goto update;
if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
goto update;
return false;
update:
br->ip6_querier.addr.u.ip6 = *saddr;
/* update protected by general multicast_lock by caller */
rcu_assign_pointer(br->ip6_querier.port, port);
return true;
}
#endif
static bool br_multicast_select_querier(struct net_bridge *br,
struct net_bridge_port *port,
struct br_ip *saddr)
{
switch (saddr->proto) {
case htons(ETH_P_IP):
return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
#endif
}
return false;
}
static void
br_multicast_update_query_timer(struct net_bridge *br,
struct bridge_mcast_other_query *query,
unsigned long max_delay)
{
if (!timer_pending(&query->timer))
query->delay_time = jiffies + max_delay;
mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
}
/*
@@ -1125,16 +1206,14 @@ timer:
static void br_multicast_query_received(struct net_bridge *br,
struct net_bridge_port *port,
struct bridge_mcast_querier *querier,
int saddr,
bool is_general_query,
struct bridge_mcast_other_query *query,
struct br_ip *saddr,
unsigned long max_delay)
{
if (saddr && is_general_query)
br_multicast_update_querier_timer(br, querier, max_delay);
else if (timer_pending(&querier->timer))
if (!br_multicast_select_querier(br, port, saddr))
return;
br_multicast_update_query_timer(br, query, max_delay);
br_multicast_mark_router(br, port);
}
@@ -1149,6 +1228,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
struct igmpv3_query *ih3;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
struct br_ip saddr;
unsigned long max_delay;
unsigned long now = jiffies;
__be32 group;
@@ -1190,11 +1270,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
goto out;
}
br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
!group, max_delay);
if (!group) {
saddr.proto = htons(ETH_P_IP);
saddr.u.ip4 = iph->saddr;
if (!group)
br_multicast_query_received(br, port, &br->ip4_other_query,
&saddr, max_delay);
goto out;
}
mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
if (!mp)
@@ -1234,6 +1317,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
struct mld2_query *mld2q;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
struct br_ip saddr;
unsigned long max_delay;
unsigned long now = jiffies;
const struct in6_addr *group = NULL;
@@ -1282,12 +1366,16 @@ static int br_ip6_multicast_query(struct net_bridge *br,
goto out;
}
br_multicast_query_received(br, port, &br->ip6_querier,
!ipv6_addr_any(&ip6h->saddr),
is_general_query, max_delay);
if (is_general_query) {
saddr.proto = htons(ETH_P_IPV6);
saddr.u.ip6 = ip6h->saddr;
if (!group)
br_multicast_query_received(br, port, &br->ip6_other_query,
&saddr, max_delay);
goto out;
} else if (!group) {
goto out;
}
mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
if (!mp)
@@ -1315,11 +1403,12 @@ out:
}
#endif
static void br_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
struct br_ip *group,
struct bridge_mcast_querier *querier,
struct bridge_mcast_query *query)
static void
br_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
struct br_ip *group,
struct bridge_mcast_other_query *other_query,
struct bridge_mcast_own_query *own_query)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
@@ -1330,7 +1419,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED) ||
timer_pending(&querier->timer))
timer_pending(&other_query->timer))
goto out;
mdb = mlock_dereference(br->mdb, br);
@@ -1344,7 +1433,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
time = jiffies + br->multicast_last_member_count *
br->multicast_last_member_interval;
mod_timer(&query->timer, time);
mod_timer(&own_query->timer, time);
for (p = mlock_dereference(mp->ports, br);
p != NULL;
@@ -1425,17 +1514,19 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
__u16 vid)
{
struct br_ip br_group;
struct bridge_mcast_query *query = port ? &port->ip4_query :
&br->ip4_query;
struct bridge_mcast_own_query *own_query;
if (ipv4_is_local_multicast(group))
return;
own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
br_group.u.ip4 = group;
br_group.proto = htons(ETH_P_IP);
br_group.vid = vid;
br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query);
br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
own_query);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -1445,18 +1536,19 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
__u16 vid)
{
struct br_ip br_group;
struct bridge_mcast_query *query = port ? &port->ip6_query :
&br->ip6_query;
struct bridge_mcast_own_query *own_query;
if (ipv6_addr_is_ll_all_nodes(group))
return;
own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
br_group.u.ip6 = *group;
br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid;
br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query);
br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
own_query);
}
#endif
@@ -1723,12 +1815,14 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
}
static void br_multicast_query_expired(struct net_bridge *br,
struct bridge_mcast_query *query)
struct bridge_mcast_own_query *query,
struct bridge_mcast_querier *querier)
{
spin_lock(&br->multicast_lock);
if (query->startup_sent < br->multicast_startup_query_count)
query->startup_sent++;
rcu_assign_pointer(querier, NULL);
br_multicast_send_query(br, NULL, query);
spin_unlock(&br->multicast_lock);
}
@@ -1737,7 +1831,7 @@ static void br_ip4_multicast_query_expired(unsigned long data)
{
struct net_bridge *br = (void *)data;
br_multicast_query_expired(br, &br->ip4_query);
br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -1745,7 +1839,7 @@ static void br_ip6_multicast_query_expired(unsigned long data)
{
struct net_bridge *br = (void *)data;
br_multicast_query_expired(br, &br->ip6_query);
br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
}
#endif
@@ -1767,28 +1861,30 @@ void br_multicast_init(struct net_bridge *br)
br->multicast_querier_interval = 255 * HZ;
br->multicast_membership_interval = 260 * HZ;
br->ip4_querier.delay_time = 0;
br->ip4_other_query.delay_time = 0;
br->ip4_querier.port = NULL;
#if IS_ENABLED(CONFIG_IPV6)
br->ip6_querier.delay_time = 0;
br->ip6_other_query.delay_time = 0;
br->ip6_querier.port = NULL;
#endif
spin_lock_init(&br->multicast_lock);
setup_timer(&br->multicast_router_timer,
br_multicast_local_router_expired, 0);
setup_timer(&br->ip4_querier.timer, br_ip4_multicast_querier_expired,
(unsigned long)br);
setup_timer(&br->ip4_query.timer, br_ip4_multicast_query_expired,
setup_timer(&br->ip4_other_query.timer,
br_ip4_multicast_querier_expired, (unsigned long)br);
setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired,
(unsigned long)br);
#if IS_ENABLED(CONFIG_IPV6)
setup_timer(&br->ip6_querier.timer, br_ip6_multicast_querier_expired,
(unsigned long)br);
setup_timer(&br->ip6_query.timer, br_ip6_multicast_query_expired,
setup_timer(&br->ip6_other_query.timer,
br_ip6_multicast_querier_expired, (unsigned long)br);
setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired,
(unsigned long)br);
#endif
}
static void __br_multicast_open(struct net_bridge *br,
struct bridge_mcast_query *query)
struct bridge_mcast_own_query *query)
{
query->startup_sent = 0;
@@ -1800,9 +1896,9 @@ static void __br_multicast_open(struct net_bridge *br,
void br_multicast_open(struct net_bridge *br)
{
__br_multicast_open(br, &br->ip4_query);
__br_multicast_open(br, &br->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
__br_multicast_open(br, &br->ip6_query);
__br_multicast_open(br, &br->ip6_own_query);
#endif
}
@@ -1815,11 +1911,11 @@ void br_multicast_stop(struct net_bridge *br)
int i;
del_timer_sync(&br->multicast_router_timer);
del_timer_sync(&br->ip4_querier.timer);
del_timer_sync(&br->ip4_query.timer);
del_timer_sync(&br->ip4_other_query.timer);
del_timer_sync(&br->ip4_own_query.timer);
#if IS_ENABLED(CONFIG_IPV6)
del_timer_sync(&br->ip6_querier.timer);
del_timer_sync(&br->ip6_query.timer);
del_timer_sync(&br->ip6_other_query.timer);
del_timer_sync(&br->ip6_own_query.timer);
#endif
spin_lock_bh(&br->multicast_lock);
@@ -1923,7 +2019,7 @@ unlock:
}
static void br_multicast_start_querier(struct net_bridge *br,
struct bridge_mcast_query *query)
struct bridge_mcast_own_query *query)
{
struct net_bridge_port *port;
@@ -1934,11 +2030,11 @@ static void br_multicast_start_querier(struct net_bridge *br,
port->state == BR_STATE_BLOCKING)
continue;
if (query == &br->ip4_query)
br_multicast_enable(&port->ip4_query);
if (query == &br->ip4_own_query)
br_multicast_enable(&port->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
else
br_multicast_enable(&port->ip6_query);
br_multicast_enable(&port->ip6_own_query);
#endif
}
}
@@ -1974,9 +2070,9 @@ rollback:
goto rollback;
}
br_multicast_start_querier(br, &br->ip4_query);
br_multicast_start_querier(br, &br->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
br_multicast_start_querier(br, &br->ip6_query);
br_multicast_start_querier(br, &br->ip6_own_query);
#endif
unlock:
@@ -2001,16 +2097,16 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
max_delay = br->multicast_query_response_interval;
if (!timer_pending(&br->ip4_querier.timer))
br->ip4_querier.delay_time = jiffies + max_delay;
if (!timer_pending(&br->ip4_other_query.timer))
br->ip4_other_query.delay_time = jiffies + max_delay;
br_multicast_start_querier(br, &br->ip4_query);
br_multicast_start_querier(br, &br->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
if (!timer_pending(&br->ip6_querier.timer))
br->ip6_querier.delay_time = jiffies + max_delay;
if (!timer_pending(&br->ip6_other_query.timer))
br->ip6_other_query.delay_time = jiffies + max_delay;
br_multicast_start_querier(br, &br->ip6_query);
br_multicast_start_querier(br, &br->ip6_own_query);
#endif
unlock:
@@ -2061,3 +2157,109 @@ unlock:
return err;
}
/**
* br_multicast_list_adjacent - Returns snooped multicast addresses
* @dev: The bridge port adjacent to which to retrieve addresses
* @br_ip_list: The list to store found, snooped multicast IP addresses in
*
* Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
* snooping feature on all bridge ports of dev's bridge device, excluding
* the addresses from dev itself.
*
* Returns the number of items added to br_ip_list.
*
* Notes:
* - br_ip_list needs to be initialized by caller
* - br_ip_list might contain duplicates in the end
* (needs to be taken care of by caller)
* - br_ip_list needs to be freed by caller
*/
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list)
{
struct net_bridge *br;
struct net_bridge_port *port;
struct net_bridge_port_group *group;
struct br_ip_list *entry;
int count = 0;
rcu_read_lock();
if (!br_ip_list || !br_port_exists(dev))
goto unlock;
port = br_port_get_rcu(dev);
if (!port || !port->br)
goto unlock;
br = port->br;
list_for_each_entry_rcu(port, &br->port_list, list) {
if (!port->dev || port->dev == dev)
continue;
hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
goto unlock;
entry->addr = group->addr;
list_add(&entry->list, br_ip_list);
count++;
}
}
unlock:
rcu_read_unlock();
return count;
}
EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
/**
* br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
* @dev: The bridge port adjacent to which to check for a querier
* @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
*
* Checks whether the given interface has a bridge on top and if so returns
* true if a selected querier is behind one of the other ports of this
* bridge. Otherwise returns false.
*/
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
{
struct net_bridge *br;
struct net_bridge_port *port;
bool ret = false;
rcu_read_lock();
if (!br_port_exists(dev))
goto unlock;
port = br_port_get_rcu(dev);
if (!port || !port->br)
goto unlock;
br = port->br;
switch (proto) {
case ETH_P_IP:
if (!timer_pending(&br->ip4_other_query.timer) ||
rcu_dereference(br->ip4_querier.port) == port)
goto unlock;
break;
#if IS_ENABLED(CONFIG_IPV6)
case ETH_P_IPV6:
if (!timer_pending(&br->ip6_other_query.timer) ||
rcu_dereference(br->ip6_querier.port) == port)
goto unlock;
break;
#endif
default:
goto unlock;
}
ret = true;
unlock:
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);

View File

@@ -535,7 +535,7 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct
if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
return br;
vlan = __vlan_find_dev_deep(br, skb->vlan_proto,
vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
vlan_tx_tag_get(skb) & VLAN_VID_MASK);
return vlan ? vlan : br;

View File

@@ -328,6 +328,7 @@ static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
{
int err;
unsigned long old_flags = p->flags;
br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
@@ -353,6 +354,8 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
if (err)
return err;
}
br_port_flags_change(p, old_flags ^ p->flags);
return 0;
}

View File

@@ -1,118 +0,0 @@
/*
* Device event handling
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/rtnetlink.h>
#include <net/net_namespace.h>
#include "br_private.h"
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr);
struct notifier_block br_device_notifier = {
.notifier_call = br_device_event
};
/*
* Handle changes in state of network devices enslaved to a bridge.
*
* Note: don't care about up/down if bridge itself is down, because
* port state is checked when bridge is brought up.
*/
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net_bridge_port *p;
struct net_bridge *br;
bool changed_addr;
int err;
/* register of bridge completed, add sysfs entries */
if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
br_sysfs_addbr(dev);
return NOTIFY_DONE;
}
/* not a port of a bridge */
p = br_port_get_rtnl(dev);
if (!p)
return NOTIFY_DONE;
br = p->br;
switch (event) {
case NETDEV_CHANGEMTU:
dev_set_mtu(br->dev, br_min_mtu(br));
break;
case NETDEV_CHANGEADDR:
spin_lock_bh(&br->lock);
br_fdb_changeaddr(p, dev->dev_addr);
changed_addr = br_stp_recalculate_bridge_id(br);
spin_unlock_bh(&br->lock);
if (changed_addr)
call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
break;
case NETDEV_CHANGE:
br_port_carrier_check(p);
break;
case NETDEV_FEAT_CHANGE:
netdev_update_features(br->dev);
break;
case NETDEV_DOWN:
spin_lock_bh(&br->lock);
if (br->dev->flags & IFF_UP)
br_stp_disable_port(p);
spin_unlock_bh(&br->lock);
break;
case NETDEV_UP:
if (netif_running(br->dev) && netif_oper_up(dev)) {
spin_lock_bh(&br->lock);
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
}
break;
case NETDEV_UNREGISTER:
br_del_if(br, dev);
break;
case NETDEV_CHANGENAME:
err = br_sysfs_renameif(p);
if (err)
return notifier_from_errno(err);
break;
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
return NOTIFY_BAD;
case NETDEV_RESEND_IGMP:
/* Propagate to master device */
call_netdevice_notifiers(event, br->dev);
break;
}
/* Events that may cause spanning tree to refresh */
if (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
event == NETDEV_CHANGE || event == NETDEV_DOWN)
br_ifinfo_notify(RTM_NEWLINK, p);
return NOTIFY_DONE;
}

View File

@@ -35,6 +35,8 @@
#define BR_GROUPFWD_DEFAULT 0
/* Don't allow forwarding control protocols like STP and LLDP */
#define BR_GROUPFWD_RESTRICTED 0x4007u
/* The Nearest Customer Bridge Group Address, 01-80-C2-00-00-[00,0B,0C,0D,0F] */
#define BR_GROUPFWD_8021AD 0xB801u
/* Path to usermode spanning tree program */
#define BR_STP_PROG "/sbin/bridge-stp"
@@ -54,30 +56,24 @@ struct mac_addr
unsigned char addr[ETH_ALEN];
};
struct br_ip
{
union {
__be32 ip4;
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr ip6;
#endif
} u;
__be16 proto;
__u16 vid;
};
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
/* our own querier */
struct bridge_mcast_query {
struct bridge_mcast_own_query {
struct timer_list timer;
u32 startup_sent;
};
/* other querier */
struct bridge_mcast_querier {
struct bridge_mcast_other_query {
struct timer_list timer;
unsigned long delay_time;
};
/* selected querier */
struct bridge_mcast_querier {
struct br_ip addr;
struct net_bridge_port __rcu *port;
};
#endif
struct net_port_vlans {
@@ -174,11 +170,13 @@ struct net_bridge_port
#define BR_ADMIN_COST 0x00000010
#define BR_LEARNING 0x00000020
#define BR_FLOOD 0x00000040
#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING)
#define BR_PROMISC 0x00000080
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
struct bridge_mcast_query ip4_query;
struct bridge_mcast_own_query ip4_own_query;
#if IS_ENABLED(CONFIG_IPV6)
struct bridge_mcast_query ip6_query;
struct bridge_mcast_own_query ip6_own_query;
#endif /* IS_ENABLED(CONFIG_IPV6) */
unsigned char multicast_router;
struct timer_list multicast_router_timer;
@@ -198,6 +196,9 @@ struct net_bridge_port
#endif
};
#define br_auto_port(p) ((p)->flags & BR_AUTO_MASK)
#define br_promisc_port(p) ((p)->flags & BR_PROMISC)
#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
@@ -227,6 +228,7 @@ struct net_bridge
bool nf_call_arptables;
#endif
u16 group_fwd_mask;
u16 group_fwd_mask_required;
/* STP */
bridge_id designated_root;
@@ -241,6 +243,7 @@ struct net_bridge
unsigned long bridge_forward_delay;
u8 group_addr[ETH_ALEN];
bool group_addr_set;
u16 root_port;
enum {
@@ -277,11 +280,13 @@ struct net_bridge
struct hlist_head router_list;
struct timer_list multicast_router_timer;
struct bridge_mcast_other_query ip4_other_query;
struct bridge_mcast_own_query ip4_own_query;
struct bridge_mcast_querier ip4_querier;
struct bridge_mcast_query ip4_query;
#if IS_ENABLED(CONFIG_IPV6)
struct bridge_mcast_other_query ip6_other_query;
struct bridge_mcast_own_query ip6_own_query;
struct bridge_mcast_querier ip6_querier;
struct bridge_mcast_query ip6_query;
#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif
@@ -290,8 +295,10 @@ struct net_bridge
struct timer_list topology_change_timer;
struct timer_list gc_timer;
struct kobject *ifobj;
u32 auto_cnt;
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
u8 vlan_enabled;
__be16 vlan_proto;
struct net_port_vlans __rcu *vlan_info;
#endif
};
@@ -327,8 +334,6 @@ struct br_input_skb_cb {
#define br_debug(br, format, args...) \
pr_debug("%s: " format, (br)->dev->name, ##args)
extern struct notifier_block br_device_notifier;
/* called under bridge lock */
static inline int br_is_root_bridge(const struct net_bridge *br)
{
@@ -395,6 +400,8 @@ int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
const unsigned char *addr, u16 nlh_flags);
int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev, int idx);
int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
/* br_forward.c */
void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb);
@@ -415,6 +422,8 @@ int br_del_if(struct net_bridge *br, struct net_device *dev);
int br_min_mtu(const struct net_bridge *br);
netdev_features_t br_features_recompute(struct net_bridge *br,
netdev_features_t features);
void br_port_flags_change(struct net_bridge_port *port, unsigned long mask);
void br_manage_promisc(struct net_bridge *br);
/* br_input.c */
int br_handle_frame_finish(struct sk_buff *skb);
@@ -485,7 +494,7 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
static inline bool
__br_multicast_querier_exists(struct net_bridge *br,
struct bridge_mcast_querier *querier)
struct bridge_mcast_other_query *querier)
{
return time_is_before_jiffies(querier->delay_time) &&
(br->multicast_querier || timer_pending(&querier->timer));
@@ -496,10 +505,10 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
{
switch (eth->h_proto) {
case (htons(ETH_P_IP)):
return __br_multicast_querier_exists(br, &br->ip4_querier);
return __br_multicast_querier_exists(br, &br->ip4_other_query);
#if IS_ENABLED(CONFIG_IPV6)
case (htons(ETH_P_IPV6)):
return __br_multicast_querier_exists(br, &br->ip6_querier);
return __br_multicast_querier_exists(br, &br->ip6_other_query);
#endif
default:
return false;
@@ -589,7 +598,10 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
int br_vlan_delete(struct net_bridge *br, u16 vid);
void br_vlan_flush(struct net_bridge *br);
bool br_vlan_find(struct net_bridge *br, u16 vid);
void br_recalculate_fwd_mask(struct net_bridge *br);
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
void br_vlan_init(struct net_bridge *br);
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
void nbp_vlan_flush(struct net_bridge_port *port);
@@ -633,6 +645,10 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
return v->pvid ?: VLAN_N_VID;
}
static inline int br_vlan_enabled(struct net_bridge *br)
{
return br->vlan_enabled;
}
#else
static inline bool br_allowed_ingress(struct net_bridge *br,
struct net_port_vlans *v,
@@ -681,6 +697,14 @@ static inline bool br_vlan_find(struct net_bridge *br, u16 vid)
return false;
}
static inline void br_recalculate_fwd_mask(struct net_bridge *br)
{
}
static inline void br_vlan_init(struct net_bridge *br)
{
}
static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
{
return -EOPNOTSUPP;
@@ -719,6 +743,11 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
{
return VLAN_N_VID; /* Returns invalid vid */
}
static inline int br_vlan_enabled(struct net_bridge *br)
{
return 0;
}
#endif
/* br_netfilter.c */

View File

@@ -312,10 +312,19 @@ static ssize_t group_addr_store(struct device *d,
new_addr[5] == 3) /* 802.1X PAE address */
return -EINVAL;
if (!rtnl_trylock())
return restart_syscall();
spin_lock_bh(&br->lock);
for (i = 0; i < 6; i++)
br->group_addr[i] = new_addr[i];
spin_unlock_bh(&br->lock);
br->group_addr_set = true;
br_recalculate_fwd_mask(br);
rtnl_unlock();
return len;
}
@@ -700,6 +709,22 @@ static ssize_t vlan_filtering_store(struct device *d,
return store_bridge_parm(d, buf, len, br_vlan_filter_toggle);
}
static DEVICE_ATTR_RW(vlan_filtering);
static ssize_t vlan_protocol_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%#06x\n", ntohs(br->vlan_proto));
}
static ssize_t vlan_protocol_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_vlan_set_proto);
}
static DEVICE_ATTR_RW(vlan_protocol);
#endif
static struct attribute *bridge_attrs[] = {
@@ -745,6 +770,7 @@ static struct attribute *bridge_attrs[] = {
#endif
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
&dev_attr_vlan_filtering.attr,
&dev_attr_vlan_protocol.attr,
#endif
NULL
};

View File

@@ -41,20 +41,30 @@ static ssize_t show_##_name(struct net_bridge_port *p, char *buf) \
} \
static int store_##_name(struct net_bridge_port *p, unsigned long v) \
{ \
unsigned long flags = p->flags; \
if (v) \
flags |= _mask; \
else \
flags &= ~_mask; \
if (flags != p->flags) { \
p->flags = flags; \
br_ifinfo_notify(RTM_NEWLINK, p); \
} \
return 0; \
return store_flag(p, v, _mask); \
} \
static BRPORT_ATTR(_name, S_IRUGO | S_IWUSR, \
show_##_name, store_##_name)
static int store_flag(struct net_bridge_port *p, unsigned long v,
unsigned long mask)
{
unsigned long flags;
flags = p->flags;
if (v)
flags |= mask;
else
flags &= ~mask;
if (flags != p->flags) {
p->flags = flags;
br_port_flags_change(p, mask);
br_ifinfo_notify(RTM_NEWLINK, p);
}
return 0;
}
static ssize_t show_path_cost(struct net_bridge_port *p, char *buf)
{

View File

@@ -60,7 +60,7 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
* that ever changes this code will allow tagged
* traffic to enter the bridge.
*/
err = vlan_vid_add(dev, htons(ETH_P_8021Q), vid);
err = vlan_vid_add(dev, br->vlan_proto, vid);
if (err)
return err;
}
@@ -80,7 +80,7 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
out_filt:
if (p)
vlan_vid_del(dev, htons(ETH_P_8021Q), vid);
vlan_vid_del(dev, br->vlan_proto, vid);
return err;
}
@@ -92,8 +92,10 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
__vlan_delete_pvid(v, vid);
clear_bit(vid, v->untagged_bitmap);
if (v->port_idx)
vlan_vid_del(v->parent.port->dev, htons(ETH_P_8021Q), vid);
if (v->port_idx) {
struct net_bridge_port *p = v->parent.port;
vlan_vid_del(p->dev, p->br->vlan_proto, vid);
}
clear_bit(vid, v->vlan_bitmap);
v->num_vlans--;
@@ -158,7 +160,8 @@ out:
bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
struct sk_buff *skb, u16 *vid)
{
int err;
bool tagged;
__be16 proto;
/* If VLAN filtering is disabled on the bridge, all packets are
* permitted.
@@ -172,19 +175,41 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
if (!v)
goto drop;
proto = br->vlan_proto;
/* If vlan tx offload is disabled on bridge device and frame was
* sent from vlan device on the bridge device, it does not have
* HW accelerated vlan tag.
*/
if (unlikely(!vlan_tx_tag_present(skb) &&
(skb->protocol == htons(ETH_P_8021Q) ||
skb->protocol == htons(ETH_P_8021AD)))) {
skb->protocol == proto)) {
skb = vlan_untag(skb);
if (unlikely(!skb))
return false;
}
err = br_vlan_get_tag(skb, vid);
if (!br_vlan_get_tag(skb, vid)) {
/* Tagged frame */
if (skb->vlan_proto != proto) {
/* Protocol-mismatch, empty out vlan_tci for new tag */
skb_push(skb, ETH_HLEN);
skb = __vlan_put_tag(skb, skb->vlan_proto,
vlan_tx_tag_get(skb));
if (unlikely(!skb))
return false;
skb_pull(skb, ETH_HLEN);
skb_reset_mac_len(skb);
*vid = 0;
tagged = false;
} else {
tagged = true;
}
} else {
/* Untagged frame */
tagged = false;
}
if (!*vid) {
u16 pvid = br_get_pvid(v);
@@ -199,9 +224,9 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
* ingress frame is considered to belong to this vlan.
*/
*vid = pvid;
if (likely(err))
if (likely(!tagged))
/* Untagged Frame. */
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid);
__vlan_hwaccel_put_tag(skb, proto, pvid);
else
/* Priority-tagged Frame.
* At this point, We know that skb->vlan_tci had
@@ -254,7 +279,9 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
if (!v)
return false;
br_vlan_get_tag(skb, vid);
if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
*vid = 0;
if (!*vid) {
*vid = br_get_pvid(v);
if (*vid == VLAN_N_VID)
@@ -351,6 +378,33 @@ out:
return found;
}
/* Must be protected by RTNL. */
static void recalculate_group_addr(struct net_bridge *br)
{
if (br->group_addr_set)
return;
spin_lock_bh(&br->lock);
if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
/* Bridge Group Address */
br->group_addr[5] = 0x00;
} else { /* vlan_enabled && ETH_P_8021AD */
/* Provider Bridge Group Address */
br->group_addr[5] = 0x08;
}
spin_unlock_bh(&br->lock);
}
/* Must be protected by RTNL. */
void br_recalculate_fwd_mask(struct net_bridge *br)
{
if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
else /* vlan_enabled && ETH_P_8021AD */
br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
~(1u << br->group_addr[5]);
}
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
{
if (!rtnl_trylock())
@@ -360,12 +414,88 @@ int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
goto unlock;
br->vlan_enabled = val;
br_manage_promisc(br);
recalculate_group_addr(br);
br_recalculate_fwd_mask(br);
unlock:
rtnl_unlock();
return 0;
}
int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
{
int err = 0;
struct net_bridge_port *p;
struct net_port_vlans *pv;
__be16 proto, oldproto;
u16 vid, errvid;
if (val != ETH_P_8021Q && val != ETH_P_8021AD)
return -EPROTONOSUPPORT;
if (!rtnl_trylock())
return restart_syscall();
proto = htons(val);
if (br->vlan_proto == proto)
goto unlock;
/* Add VLANs for the new proto to the device filter. */
list_for_each_entry(p, &br->port_list, list) {
pv = rtnl_dereference(p->vlan_info);
if (!pv)
continue;
for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
err = vlan_vid_add(p->dev, proto, vid);
if (err)
goto err_filt;
}
}
oldproto = br->vlan_proto;
br->vlan_proto = proto;
recalculate_group_addr(br);
br_recalculate_fwd_mask(br);
/* Delete VLANs for the old proto from the device filter. */
list_for_each_entry(p, &br->port_list, list) {
pv = rtnl_dereference(p->vlan_info);
if (!pv)
continue;
for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
vlan_vid_del(p->dev, oldproto, vid);
}
unlock:
rtnl_unlock();
return err;
err_filt:
errvid = vid;
for_each_set_bit(vid, pv->vlan_bitmap, errvid)
vlan_vid_del(p->dev, proto, vid);
list_for_each_entry_continue_reverse(p, &br->port_list, list) {
pv = rtnl_dereference(p->vlan_info);
if (!pv)
continue;
for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
vlan_vid_del(p->dev, proto, vid);
}
goto unlock;
}
void br_vlan_init(struct net_bridge *br)
{
br->vlan_proto = htons(ETH_P_8021Q);
}
/* Must be protected by RTNL.
* Must be called with vid in range from 1 to 4094 inclusive.
*/
@@ -432,7 +562,7 @@ void nbp_vlan_flush(struct net_bridge_port *port)
return;
for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
vlan_vid_del(port->dev, htons(ETH_P_8021Q), vid);
vlan_vid_del(port->dev, port->br->vlan_proto, vid);
__vlan_flush(pv);
}

View File

@@ -2,14 +2,23 @@
# Bridge netfilter configuration
#
#
config NF_TABLES_BRIDGE
depends on NF_TABLES
menuconfig NF_TABLES_BRIDGE
depends on BRIDGE && NETFILTER && NF_TABLES
tristate "Ethernet Bridge nf_tables support"
if NF_TABLES_BRIDGE
config NFT_BRIDGE_META
tristate "Netfilter nf_table bridge meta support"
depends on NFT_META
help
Add support for bridge dedicated meta key.
endif # NF_TABLES_BRIDGE
menuconfig BRIDGE_NF_EBTABLES
tristate "Ethernet Bridge tables (ebtables) support"
depends on BRIDGE && NETFILTER
select NETFILTER_XTABLES
depends on BRIDGE && NETFILTER && NETFILTER_XTABLES
help
ebtables is a general, extensible frame/packet identification
framework. Say 'Y' or 'M' here if you want to do Ethernet

View File

@@ -3,6 +3,7 @@
#
obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o
obj-$(CONFIG_NFT_BRIDGE_META) += nft_meta_bridge.o
obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o

View File

@@ -0,0 +1,139 @@
/*
* Copyright (c) 2014 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_meta.h>
#include "../br_private.h"
static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
struct nft_data data[NFT_REG_MAX + 1],
const struct nft_pktinfo *pkt)
{
const struct nft_meta *priv = nft_expr_priv(expr);
const struct net_device *in = pkt->in, *out = pkt->out;
struct nft_data *dest = &data[priv->dreg];
const struct net_bridge_port *p;
switch (priv->key) {
case NFT_META_BRI_IIFNAME:
if (in == NULL || (p = br_port_get_rcu(in)) == NULL)
goto err;
break;
case NFT_META_BRI_OIFNAME:
if (out == NULL || (p = br_port_get_rcu(out)) == NULL)
goto err;
break;
default:
goto out;
}
strncpy((char *)dest->data, p->br->dev->name, sizeof(dest->data));
return;
out:
return nft_meta_get_eval(expr, data, pkt);
err:
data[NFT_REG_VERDICT].verdict = NFT_BREAK;
}
static int nft_meta_bridge_get_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_meta *priv = nft_expr_priv(expr);
int err;
priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
switch (priv->key) {
case NFT_META_BRI_IIFNAME:
case NFT_META_BRI_OIFNAME:
break;
default:
return nft_meta_get_init(ctx, expr, tb);
}
priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
err = nft_validate_output_register(priv->dreg);
if (err < 0)
return err;
err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
if (err < 0)
return err;
return 0;
}
static struct nft_expr_type nft_meta_bridge_type;
static const struct nft_expr_ops nft_meta_bridge_get_ops = {
.type = &nft_meta_bridge_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
.eval = nft_meta_bridge_get_eval,
.init = nft_meta_bridge_get_init,
.dump = nft_meta_get_dump,
};
static const struct nft_expr_ops nft_meta_bridge_set_ops = {
.type = &nft_meta_bridge_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
.eval = nft_meta_set_eval,
.init = nft_meta_set_init,
.dump = nft_meta_set_dump,
};
static const struct nft_expr_ops *
nft_meta_bridge_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
if (tb[NFTA_META_KEY] == NULL)
return ERR_PTR(-EINVAL);
if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG])
return ERR_PTR(-EINVAL);
if (tb[NFTA_META_DREG])
return &nft_meta_bridge_get_ops;
if (tb[NFTA_META_SREG])
return &nft_meta_bridge_set_ops;
return ERR_PTR(-EINVAL);
}
static struct nft_expr_type nft_meta_bridge_type __read_mostly = {
.family = NFPROTO_BRIDGE,
.name = "meta",
.select_ops = &nft_meta_bridge_select_ops,
.policy = nft_meta_policy,
.maxattr = NFTA_META_MAX,
.owner = THIS_MODULE,
};
static int __init nft_meta_bridge_module_init(void)
{
return nft_register_expr(&nft_meta_bridge_type);
}
static void __exit nft_meta_bridge_module_exit(void)
{
nft_unregister_expr(&nft_meta_bridge_type);
}
module_init(nft_meta_bridge_module_init);
module_exit(nft_meta_bridge_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "meta");

View File

@@ -337,6 +337,29 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
return (struct dev_rcv_lists *)dev->ml_priv;
}
/**
* effhash - hash function for 29 bit CAN identifier reduction
* @can_id: 29 bit CAN identifier
*
* Description:
* To reduce the linear traversal in one linked list of _single_ EFF CAN
* frame subscriptions the 29 bit identifier is mapped to 10 bits.
* (see CAN_EFF_RCV_HASH_BITS definition)
*
* Return:
* Hash value from 0x000 - 0x3FF ( enforced by CAN_EFF_RCV_HASH_BITS mask )
*/
static unsigned int effhash(canid_t can_id)
{
unsigned int hash;
hash = can_id;
hash ^= can_id >> CAN_EFF_RCV_HASH_BITS;
hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS);
return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1);
}
/**
* find_rcv_list - determine optimal filterlist inside device filter struct
* @can_id: pointer to CAN identifier of a given can_filter
@@ -400,10 +423,8 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
!(*can_id & CAN_RTR_FLAG)) {
if (*can_id & CAN_EFF_FLAG) {
if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
/* RFC: a future use-case for hash-tables? */
return &d->rx[RX_EFF];
}
if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS))
return &d->rx_eff[effhash(*can_id)];
} else {
if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
return &d->rx_sff[*can_id];
@@ -632,7 +653,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
return matches;
if (can_id & CAN_EFF_FLAG) {
hlist_for_each_entry_rcu(r, &d->rx[RX_EFF], list) {
hlist_for_each_entry_rcu(r, &d->rx_eff[effhash(can_id)], list) {
if (r->can_id == can_id) {
deliver(skb, r);
matches++;

View File

@@ -59,12 +59,17 @@ struct receiver {
char *ident;
};
enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX };
#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
#define CAN_EFF_RCV_HASH_BITS 10
#define CAN_EFF_RCV_ARRAY_SZ (1 << CAN_EFF_RCV_HASH_BITS)
enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_MAX };
/* per device receive filters linked at dev->ml_priv */
struct dev_rcv_lists {
struct hlist_head rx[RX_MAX];
struct hlist_head rx_sff[0x800];
struct hlist_head rx_sff[CAN_SFF_RCV_ARRAY_SZ];
struct hlist_head rx_eff[CAN_EFF_RCV_ARRAY_SZ];
int remove_on_zero_entries;
int entries;
};

View File

@@ -80,7 +80,6 @@ static const char rx_list_name[][8] = {
[RX_ALL] = "rx_all",
[RX_FIL] = "rx_fil",
[RX_INV] = "rx_inv",
[RX_EFF] = "rx_eff",
};
/*
@@ -389,25 +388,26 @@ static const struct file_operations can_rcvlist_proc_fops = {
.release = single_release,
};
static inline void can_rcvlist_sff_proc_show_one(struct seq_file *m,
struct net_device *dev,
struct dev_rcv_lists *d)
static inline void can_rcvlist_proc_show_array(struct seq_file *m,
struct net_device *dev,
struct hlist_head *rcv_array,
unsigned int rcv_array_sz)
{
int i;
unsigned int i;
int all_empty = 1;
/* check whether at least one list is non-empty */
for (i = 0; i < 0x800; i++)
if (!hlist_empty(&d->rx_sff[i])) {
for (i = 0; i < rcv_array_sz; i++)
if (!hlist_empty(&rcv_array[i])) {
all_empty = 0;
break;
}
if (!all_empty) {
can_print_recv_banner(m);
for (i = 0; i < 0x800; i++) {
if (!hlist_empty(&d->rx_sff[i]))
can_print_rcvlist(m, &d->rx_sff[i], dev);
for (i = 0; i < rcv_array_sz; i++) {
if (!hlist_empty(&rcv_array[i]))
can_print_rcvlist(m, &rcv_array[i], dev);
}
} else
seq_printf(m, " (%s: no entry)\n", DNAME(dev));
@@ -425,12 +425,15 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
/* sff receive list for 'all' CAN devices (dev == NULL) */
d = &can_rx_alldev_list;
can_rcvlist_sff_proc_show_one(m, NULL, d);
can_rcvlist_proc_show_array(m, NULL, d->rx_sff, ARRAY_SIZE(d->rx_sff));
/* sff receive list for registered CAN devices */
for_each_netdev_rcu(&init_net, dev) {
if (dev->type == ARPHRD_CAN && dev->ml_priv)
can_rcvlist_sff_proc_show_one(m, dev, dev->ml_priv);
if (dev->type == ARPHRD_CAN && dev->ml_priv) {
d = dev->ml_priv;
can_rcvlist_proc_show_array(m, dev, d->rx_sff,
ARRAY_SIZE(d->rx_sff));
}
}
rcu_read_unlock();
@@ -452,6 +455,49 @@ static const struct file_operations can_rcvlist_sff_proc_fops = {
.release = single_release,
};
static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
{
struct net_device *dev;
struct dev_rcv_lists *d;
/* RX_EFF */
seq_puts(m, "\nreceive list 'rx_eff':\n");
rcu_read_lock();
/* eff receive list for 'all' CAN devices (dev == NULL) */
d = &can_rx_alldev_list;
can_rcvlist_proc_show_array(m, NULL, d->rx_eff, ARRAY_SIZE(d->rx_eff));
/* eff receive list for registered CAN devices */
for_each_netdev_rcu(&init_net, dev) {
if (dev->type == ARPHRD_CAN && dev->ml_priv) {
d = dev->ml_priv;
can_rcvlist_proc_show_array(m, dev, d->rx_eff,
ARRAY_SIZE(d->rx_eff));
}
}
rcu_read_unlock();
seq_putc(m, '\n');
return 0;
}
static int can_rcvlist_eff_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, can_rcvlist_eff_proc_show, NULL);
}
static const struct file_operations can_rcvlist_eff_proc_fops = {
.owner = THIS_MODULE,
.open = can_rcvlist_eff_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* proc utility functions
*/
@@ -491,8 +537,8 @@ void can_init_proc(void)
&can_rcvlist_proc_fops, (void *)RX_FIL);
pde_rcvlist_inv = proc_create_data(CAN_PROC_RCVLIST_INV, 0644, can_dir,
&can_rcvlist_proc_fops, (void *)RX_INV);
pde_rcvlist_eff = proc_create_data(CAN_PROC_RCVLIST_EFF, 0644, can_dir,
&can_rcvlist_proc_fops, (void *)RX_EFF);
pde_rcvlist_eff = proc_create(CAN_PROC_RCVLIST_EFF, 0644, can_dir,
&can_rcvlist_eff_proc_fops);
pde_rcvlist_sff = proc_create(CAN_PROC_RCVLIST_SFF, 0644, can_dir,
&can_rcvlist_sff_proc_fops);
}

View File

@@ -2491,7 +2491,7 @@ EXPORT_SYMBOL(ceph_osdc_sync);
* Call all pending notify callbacks - for use after a watch is
* unregistered, to make sure no more callbacks for it will be invoked
*/
extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
{
flush_workqueue(osdc->notify_wq);
}

View File

@@ -9,7 +9,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
sock_diag.o dev_ioctl.o
sock_diag.o dev_ioctl.o tso.o
obj-$(CONFIG_XFRM) += flow.o
obj-y += net-sysfs.o

View File

@@ -739,11 +739,15 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
__sum16 sum;
sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
if (likely(!sum)) {
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
netdev_rx_csum_fault(skb->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && !sum &&
!skb->csum_complete_sw)
netdev_rx_csum_fault(skb->dev);
/* Save checksum complete for later use */
skb->csum = sum;
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum_complete_sw = 1;
return sum;
}
EXPORT_SYMBOL(__skb_checksum_complete_head);

View File

@@ -1661,6 +1661,29 @@ bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(is_skb_forwardable);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
}
if (unlikely(!is_skb_forwardable(dev, skb))) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
skb_scrub_packet(skb, true);
skb->protocol = eth_type_trans(skb, dev);
return 0;
}
EXPORT_SYMBOL_GPL(__dev_forward_skb);
/**
* dev_forward_skb - loopback an skb to another netif
*
@@ -1681,24 +1704,7 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
*/
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
{
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
}
if (unlikely(!is_skb_forwardable(dev, skb))) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
skb_scrub_packet(skb, true);
skb->protocol = eth_type_trans(skb, dev);
return netif_rx_internal(skb);
return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
}
EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -2507,13 +2513,39 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
return 0;
}
/* If MPLS offload request, verify we are testing hardware MPLS features
* instead of standard features for the netdev.
*/
#ifdef CONFIG_NET_MPLS_GSO
static netdev_features_t net_mpls_features(struct sk_buff *skb,
netdev_features_t features,
__be16 type)
{
if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC))
features &= skb->dev->mpls_features;
return features;
}
#else
static netdev_features_t net_mpls_features(struct sk_buff *skb,
netdev_features_t features,
__be16 type)
{
return features;
}
#endif
static netdev_features_t harmonize_features(struct sk_buff *skb,
netdev_features_t features)
{
int tmp;
__be16 type;
type = skb_network_protocol(skb, &tmp);
features = net_mpls_features(skb, features, type);
if (skb->ip_summed != CHECKSUM_NONE &&
!can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
!can_checksum_protocol(features, type)) {
features &= ~NETIF_F_ALL_CSUM;
} else if (illegal_highdma(skb->dev, skb)) {
features &= ~NETIF_F_SG;
@@ -5689,10 +5721,6 @@ static void rollback_registered_many(struct list_head *head)
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
if (!dev->rtnl_link_ops ||
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
/*
* Flush the unicast and multicast chains
*/
@@ -5702,6 +5730,10 @@ static void rollback_registered_many(struct list_head *head)
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
if (!dev->rtnl_link_ops ||
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
/* Notifier chain MUST detach us all upper devices. */
WARN_ON(netdev_has_any_upper_dev(dev));
@@ -5927,10 +5959,7 @@ static void netdev_init_one_queue(struct net_device *dev,
static void netif_free_tx_queues(struct net_device *dev)
{
if (is_vmalloc_addr(dev->_tx))
vfree(dev->_tx);
else
kfree(dev->_tx);
kvfree(dev->_tx);
}
static int netif_alloc_netdev_queues(struct net_device *dev)
@@ -6404,10 +6433,7 @@ void netdev_freemem(struct net_device *dev)
{
char *addr = (char *)dev - dev->padded;
if (is_vmalloc_addr(addr))
vfree(addr);
else
kfree(addr);
kvfree(addr);
}
/**
@@ -6512,11 +6538,6 @@ free_all:
free_pcpu:
free_percpu(dev->pcpu_refcnt);
netif_free_tx_queues(dev);
#ifdef CONFIG_SYSFS
kfree(dev->_rx);
#endif
free_dev:
netdev_freemem(dev);
return NULL;
@@ -6613,6 +6634,9 @@ EXPORT_SYMBOL(unregister_netdevice_queue);
/**
* unregister_netdevice_many - unregister many devices
* @head: list of devices
*
* Note: As most callers use a stack allocated list_head,
* we force a list_del() to make sure stack wont be corrupted later.
*/
void unregister_netdevice_many(struct list_head *head)
{
@@ -6622,6 +6646,7 @@ void unregister_netdevice_many(struct list_head *head)
rollback_registered_many(head);
list_for_each_entry(dev, head, unreg_list)
net_set_todo(dev);
list_del(head);
}
}
EXPORT_SYMBOL(unregister_netdevice_many);
@@ -7077,7 +7102,6 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
}
}
unregister_netdevice_many(&dev_kill_list);
list_del(&dev_kill_list);
rtnl_unlock();
}

View File

@@ -225,6 +225,91 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
}
EXPORT_SYMBOL(__hw_addr_unsync);
/**
* __hw_addr_sync_dev - Synchonize device's multicast list
* @list: address list to syncronize
* @dev: device to sync
* @sync: function to call if address should be added
* @unsync: function to call if address should be removed
*
* This funciton is intended to be called from the ndo_set_rx_mode
* function of devices that require explicit address add/remove
* notifications. The unsync function may be NULL in which case
* the addresses requiring removal will simply be removed without
* any notification to the device.
**/
int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*sync)(struct net_device *, const unsigned char *),
int (*unsync)(struct net_device *,
const unsigned char *))
{
struct netdev_hw_addr *ha, *tmp;
int err;
/* first go through and flush out any stale entries */
list_for_each_entry_safe(ha, tmp, &list->list, list) {
if (!ha->sync_cnt || ha->refcount != 1)
continue;
/* if unsync is defined and fails defer unsyncing address */
if (unsync && unsync(dev, ha->addr))
continue;
ha->sync_cnt--;
__hw_addr_del_entry(list, ha, false, false);
}
/* go through and sync new entries to the list */
list_for_each_entry_safe(ha, tmp, &list->list, list) {
if (ha->sync_cnt)
continue;
err = sync(dev, ha->addr);
if (err)
return err;
ha->sync_cnt++;
ha->refcount++;
}
return 0;
}
EXPORT_SYMBOL(__hw_addr_sync_dev);
/**
* __hw_addr_unsync_dev - Remove synchonized addresses from device
* @list: address list to remove syncronized addresses from
* @dev: device to sync
* @unsync: function to call if address should be removed
*
* Remove all addresses that were added to the device by __hw_addr_sync_dev().
* This function is intended to be called from the ndo_stop or ndo_open
* functions on devices that require explicit address add/remove
* notifications. If the unsync function pointer is NULL then this function
* can be used to just reset the sync_cnt for the addresses in the list.
**/
void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *))
{
struct netdev_hw_addr *ha, *tmp;
list_for_each_entry_safe(ha, tmp, &list->list, list) {
if (!ha->sync_cnt)
continue;
/* if unsync is defined and fails defer unsyncing address */
if (unsync && unsync(dev, ha->addr))
continue;
ha->sync_cnt--;
__hw_addr_del_entry(list, ha, false, false);
}
}
EXPORT_SYMBOL(__hw_addr_unsync_dev);
static void __hw_addr_flush(struct netdev_hw_addr_list *list)
{
struct netdev_hw_addr *ha, *tmp;

View File

@@ -557,6 +557,23 @@ err_out:
return ret;
}
static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr,
struct ethtool_rxnfc *rx_rings,
u32 size)
{
int i;
if (copy_from_user(indir, useraddr, size * sizeof(indir[0])))
return -EFAULT;
/* Validate ring indices */
for (i = 0; i < size; i++)
if (indir[i] >= rx_rings->data)
return -EINVAL;
return 0;
}
static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
void __user *useraddr)
{
@@ -565,7 +582,7 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
int ret;
if (!dev->ethtool_ops->get_rxfh_indir_size ||
!dev->ethtool_ops->get_rxfh_indir)
!dev->ethtool_ops->get_rxfh)
return -EOPNOTSUPP;
dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
if (dev_size == 0)
@@ -591,7 +608,7 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
if (!indir)
return -ENOMEM;
ret = dev->ethtool_ops->get_rxfh_indir(dev, indir);
ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL);
if (ret)
goto out;
@@ -613,8 +630,9 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
u32 *indir;
const struct ethtool_ops *ops = dev->ethtool_ops;
int ret;
u32 ringidx_offset = offsetof(struct ethtool_rxfh_indir, ring_index[0]);
if (!ops->get_rxfh_indir_size || !ops->set_rxfh_indir ||
if (!ops->get_rxfh_indir_size || !ops->set_rxfh ||
!ops->get_rxnfc)
return -EOPNOTSUPP;
@@ -643,31 +661,187 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
for (i = 0; i < dev_size; i++)
indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
} else {
if (copy_from_user(indir,
useraddr +
offsetof(struct ethtool_rxfh_indir,
ring_index[0]),
dev_size * sizeof(indir[0]))) {
ret = -EFAULT;
ret = ethtool_copy_validate_indir(indir,
useraddr + ringidx_offset,
&rx_rings,
dev_size);
if (ret)
goto out;
}
/* Validate ring indices */
for (i = 0; i < dev_size; i++) {
if (indir[i] >= rx_rings.data) {
ret = -EINVAL;
goto out;
}
}
}
ret = ops->set_rxfh_indir(dev, indir);
ret = ops->set_rxfh(dev, indir, NULL);
out:
kfree(indir);
return ret;
}
static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
void __user *useraddr)
{
int ret;
const struct ethtool_ops *ops = dev->ethtool_ops;
u32 user_indir_size, user_key_size;
u32 dev_indir_size = 0, dev_key_size = 0;
struct ethtool_rxfh rxfh;
u32 total_size;
u32 indir_bytes;
u32 *indir = NULL;
u8 *hkey = NULL;
u8 *rss_config;
if (!(dev->ethtool_ops->get_rxfh_indir_size ||
dev->ethtool_ops->get_rxfh_key_size) ||
!dev->ethtool_ops->get_rxfh)
return -EOPNOTSUPP;
if (ops->get_rxfh_indir_size)
dev_indir_size = ops->get_rxfh_indir_size(dev);
if (ops->get_rxfh_key_size)
dev_key_size = ops->get_rxfh_key_size(dev);
if ((dev_key_size + dev_indir_size) == 0)
return -EOPNOTSUPP;
if (copy_from_user(&rxfh, useraddr, sizeof(rxfh)))
return -EFAULT;
user_indir_size = rxfh.indir_size;
user_key_size = rxfh.key_size;
/* Check that reserved fields are 0 for now */
if (rxfh.rss_context || rxfh.rsvd[0] || rxfh.rsvd[1])
return -EINVAL;
rxfh.indir_size = dev_indir_size;
rxfh.key_size = dev_key_size;
if (copy_to_user(useraddr, &rxfh, sizeof(rxfh)))
return -EFAULT;
/* If the user buffer size is 0, this is just a query for the
* device table size and key size. Otherwise, if the User size is
* not equal to device table size or key size it's an error.
*/
if (!user_indir_size && !user_key_size)
return 0;
if ((user_indir_size && (user_indir_size != dev_indir_size)) ||
(user_key_size && (user_key_size != dev_key_size)))
return -EINVAL;
indir_bytes = user_indir_size * sizeof(indir[0]);
total_size = indir_bytes + user_key_size;
rss_config = kzalloc(total_size, GFP_USER);
if (!rss_config)
return -ENOMEM;
if (user_indir_size)
indir = (u32 *)rss_config;
if (user_key_size)
hkey = rss_config + indir_bytes;
ret = dev->ethtool_ops->get_rxfh(dev, indir, hkey);
if (!ret) {
if (copy_to_user(useraddr +
offsetof(struct ethtool_rxfh, rss_config[0]),
rss_config, total_size))
ret = -EFAULT;
}
kfree(rss_config);
return ret;
}
static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
void __user *useraddr)
{
int ret;
const struct ethtool_ops *ops = dev->ethtool_ops;
struct ethtool_rxnfc rx_rings;
struct ethtool_rxfh rxfh;
u32 dev_indir_size = 0, dev_key_size = 0, i;
u32 *indir = NULL, indir_bytes = 0;
u8 *hkey = NULL;
u8 *rss_config;
u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]);
if (!(ops->get_rxfh_indir_size || ops->get_rxfh_key_size) ||
!ops->get_rxnfc || !ops->set_rxfh)
return -EOPNOTSUPP;
if (ops->get_rxfh_indir_size)
dev_indir_size = ops->get_rxfh_indir_size(dev);
if (ops->get_rxfh_key_size)
dev_key_size = dev->ethtool_ops->get_rxfh_key_size(dev);
if ((dev_key_size + dev_indir_size) == 0)
return -EOPNOTSUPP;
if (copy_from_user(&rxfh, useraddr, sizeof(rxfh)))
return -EFAULT;
/* Check that reserved fields are 0 for now */
if (rxfh.rss_context || rxfh.rsvd[0] || rxfh.rsvd[1])
return -EINVAL;
/* If either indir or hash key is valid, proceed further.
* It is not valid to request that both be unchanged.
*/
if ((rxfh.indir_size &&
rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE &&
rxfh.indir_size != dev_indir_size) ||
(rxfh.key_size && (rxfh.key_size != dev_key_size)) ||
(rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE &&
rxfh.key_size == 0))
return -EINVAL;
if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
indir_bytes = dev_indir_size * sizeof(indir[0]);
rss_config = kzalloc(indir_bytes + rxfh.key_size, GFP_USER);
if (!rss_config)
return -ENOMEM;
rx_rings.cmd = ETHTOOL_GRXRINGS;
ret = ops->get_rxnfc(dev, &rx_rings, NULL);
if (ret)
goto out;
/* rxfh.indir_size == 0 means reset the indir table to default.
* rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE means leave it unchanged.
*/
if (rxfh.indir_size &&
rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) {
indir = (u32 *)rss_config;
ret = ethtool_copy_validate_indir(indir,
useraddr + rss_cfg_offset,
&rx_rings,
rxfh.indir_size);
if (ret)
goto out;
} else if (rxfh.indir_size == 0) {
indir = (u32 *)rss_config;
for (i = 0; i < dev_indir_size; i++)
indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
}
if (rxfh.key_size) {
hkey = rss_config + indir_bytes;
if (copy_from_user(hkey,
useraddr + rss_cfg_offset + indir_bytes,
rxfh.key_size)) {
ret = -EFAULT;
goto out;
}
}
ret = ops->set_rxfh(dev, indir, hkey);
out:
kfree(rss_config);
return ret;
}
static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
{
struct ethtool_regs regs;
@@ -1491,6 +1665,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
case ETHTOOL_GRXFHINDIR:
case ETHTOOL_GRSSH:
case ETHTOOL_GFEATURES:
case ETHTOOL_GCHANNELS:
case ETHTOOL_GET_TS_INFO:
@@ -1628,6 +1803,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_SRXFHINDIR:
rc = ethtool_set_rxfh_indir(dev, useraddr);
break;
case ETHTOOL_GRSSH:
rc = ethtool_get_rxfh(dev, useraddr);
break;
case ETHTOOL_SRSSH:
rc = ethtool_set_rxfh(dev, useraddr);
break;
case ETHTOOL_GFEATURES:
rc = ethtool_get_features(dev, useraddr);
break;

File diff suppressed because it is too large Load Diff

View File

@@ -273,7 +273,7 @@ static void cleanup_net(struct work_struct *work)
{
const struct pernet_operations *ops;
struct net *net, *tmp;
LIST_HEAD(net_kill_list);
struct list_head net_kill_list;
LIST_HEAD(net_exit_list);
/* Atomically snapshot the list of namespaces to cleanup */

View File

@@ -573,7 +573,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
is_zero_ether_addr(pkt_dev->src_mac) ?
pkt_dev->odev->dev_addr : pkt_dev->src_mac);
seq_printf(seq, "dst_mac: ");
seq_puts(seq, "dst_mac: ");
seq_printf(seq, "%pM\n", pkt_dev->dst_mac);
seq_printf(seq,
@@ -588,7 +588,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->nr_labels) {
unsigned int i;
seq_printf(seq, " mpls: ");
seq_puts(seq, " mpls: ");
for (i = 0; i < pkt_dev->nr_labels; i++)
seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
i == pkt_dev->nr_labels-1 ? "\n" : ", ");
@@ -613,67 +613,67 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->node >= 0)
seq_printf(seq, " node: %d\n", pkt_dev->node);
seq_printf(seq, " Flags: ");
seq_puts(seq, " Flags: ");
if (pkt_dev->flags & F_IPV6)
seq_printf(seq, "IPV6 ");
seq_puts(seq, "IPV6 ");
if (pkt_dev->flags & F_IPSRC_RND)
seq_printf(seq, "IPSRC_RND ");
seq_puts(seq, "IPSRC_RND ");
if (pkt_dev->flags & F_IPDST_RND)
seq_printf(seq, "IPDST_RND ");
seq_puts(seq, "IPDST_RND ");
if (pkt_dev->flags & F_TXSIZE_RND)
seq_printf(seq, "TXSIZE_RND ");
seq_puts(seq, "TXSIZE_RND ");
if (pkt_dev->flags & F_UDPSRC_RND)
seq_printf(seq, "UDPSRC_RND ");
seq_puts(seq, "UDPSRC_RND ");
if (pkt_dev->flags & F_UDPDST_RND)
seq_printf(seq, "UDPDST_RND ");
seq_puts(seq, "UDPDST_RND ");
if (pkt_dev->flags & F_UDPCSUM)
seq_printf(seq, "UDPCSUM ");
seq_puts(seq, "UDPCSUM ");
if (pkt_dev->flags & F_MPLS_RND)
seq_printf(seq, "MPLS_RND ");
seq_puts(seq, "MPLS_RND ");
if (pkt_dev->flags & F_QUEUE_MAP_RND)
seq_printf(seq, "QUEUE_MAP_RND ");
seq_puts(seq, "QUEUE_MAP_RND ");
if (pkt_dev->flags & F_QUEUE_MAP_CPU)
seq_printf(seq, "QUEUE_MAP_CPU ");
seq_puts(seq, "QUEUE_MAP_CPU ");
if (pkt_dev->cflows) {
if (pkt_dev->flags & F_FLOW_SEQ)
seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/
seq_puts(seq, "FLOW_SEQ "); /*in sequence flows*/
else
seq_printf(seq, "FLOW_RND ");
seq_puts(seq, "FLOW_RND ");
}
#ifdef CONFIG_XFRM
if (pkt_dev->flags & F_IPSEC_ON) {
seq_printf(seq, "IPSEC ");
seq_puts(seq, "IPSEC ");
if (pkt_dev->spi)
seq_printf(seq, "spi:%u", pkt_dev->spi);
}
#endif
if (pkt_dev->flags & F_MACSRC_RND)
seq_printf(seq, "MACSRC_RND ");
seq_puts(seq, "MACSRC_RND ");
if (pkt_dev->flags & F_MACDST_RND)
seq_printf(seq, "MACDST_RND ");
seq_puts(seq, "MACDST_RND ");
if (pkt_dev->flags & F_VID_RND)
seq_printf(seq, "VID_RND ");
seq_puts(seq, "VID_RND ");
if (pkt_dev->flags & F_SVID_RND)
seq_printf(seq, "SVID_RND ");
seq_puts(seq, "SVID_RND ");
if (pkt_dev->flags & F_NODE)
seq_printf(seq, "NODE_ALLOC ");
seq_puts(seq, "NODE_ALLOC ");
seq_puts(seq, "\n");
@@ -716,7 +716,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->result[0])
seq_printf(seq, "Result: %s\n", pkt_dev->result);
else
seq_printf(seq, "Result: Idle\n");
seq_puts(seq, "Result: Idle\n");
return 0;
}
@@ -1735,14 +1735,14 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
BUG_ON(!t);
seq_printf(seq, "Running: ");
seq_puts(seq, "Running: ");
if_lock(t);
list_for_each_entry(pkt_dev, &t->if_list, list)
if (pkt_dev->running)
seq_printf(seq, "%s ", pkt_dev->odevname);
seq_printf(seq, "\nStopped: ");
seq_puts(seq, "\nStopped: ");
list_for_each_entry(pkt_dev, &t->if_list, list)
if (!pkt_dev->running)
@@ -1751,7 +1751,7 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
if (t->result[0])
seq_printf(seq, "\nResult: %s\n", t->result);
else
seq_printf(seq, "\nResult: NA\n");
seq_puts(seq, "\nResult: NA\n");
if_unlock(t);

View File

@@ -88,7 +88,7 @@ EXPORT_SYMBOL_GPL(ptp_classify_raw);
void __init ptp_classifier_init(void)
{
static struct sock_filter ptp_filter[] = {
static struct sock_filter ptp_filter[] __initdata = {
{ 0x28, 0, 0, 0x0000000c },
{ 0x15, 0, 12, 0x00000800 },
{ 0x30, 0, 0, 0x00000017 },
@@ -133,7 +133,7 @@ void __init ptp_classifier_init(void)
{ 0x16, 0, 0, 0x00000000 },
{ 0x06, 0, 0, 0x00000000 },
};
struct sock_fprog ptp_prog = {
struct sock_fprog_kern ptp_prog = {
.len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
};

View File

@@ -798,8 +798,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
size += num_vfs *
(nla_total_size(sizeof(struct ifla_vf_mac)) +
nla_total_size(sizeof(struct ifla_vf_vlan)) +
nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
nla_total_size(sizeof(struct ifla_vf_spoofchk)));
nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
nla_total_size(sizeof(struct ifla_vf_rate)));
return size;
} else
return 0;
@@ -1065,6 +1065,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
struct ifla_vf_info ivi;
struct ifla_vf_mac vf_mac;
struct ifla_vf_vlan vf_vlan;
struct ifla_vf_rate vf_rate;
struct ifla_vf_tx_rate vf_tx_rate;
struct ifla_vf_spoofchk vf_spoofchk;
struct ifla_vf_link_state vf_linkstate;
@@ -1085,6 +1086,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
break;
vf_mac.vf =
vf_vlan.vf =
vf_rate.vf =
vf_tx_rate.vf =
vf_spoofchk.vf =
vf_linkstate.vf = ivi.vf;
@@ -1092,7 +1094,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
vf_vlan.vlan = ivi.vlan;
vf_vlan.qos = ivi.qos;
vf_tx_rate.rate = ivi.tx_rate;
vf_tx_rate.rate = ivi.max_tx_rate;
vf_rate.min_tx_rate = ivi.min_tx_rate;
vf_rate.max_tx_rate = ivi.max_tx_rate;
vf_spoofchk.setting = ivi.spoofchk;
vf_linkstate.link_state = ivi.linkstate;
vf = nla_nest_start(skb, IFLA_VF_INFO);
@@ -1102,6 +1106,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
}
if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
&vf_rate) ||
nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
&vf_tx_rate) ||
nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
@@ -1208,6 +1214,10 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
.len = sizeof(struct ifla_vf_tx_rate) },
[IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_spoofchk) },
[IFLA_VF_RATE] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_rate) },
[IFLA_VF_LINK_STATE] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_link_state) },
};
static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
@@ -1234,6 +1244,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
struct nlattr *tb[IFLA_MAX+1];
u32 ext_filter_mask = 0;
int err;
int hdrlen;
s_h = cb->args[0];
s_idx = cb->args[1];
@@ -1241,8 +1252,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
cb->seq = net->dev_base_seq;
if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
ifla_policy) >= 0) {
/* A hack to preserve kernel<->userspace interface.
* The correct header is ifinfomsg. It is consistent with rtnl_getlink.
* However, before Linux v3.9 the code here assumed rtgenmsg and that's
* what iproute2 < v3.9.0 used.
* We can detect the old iproute2. Even including the IFLA_EXT_MASK
* attribute, its netlink message is shorter than struct ifinfomsg.
*/
hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
if (tb[IFLA_EXT_MASK])
ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
@@ -1367,11 +1387,29 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
}
case IFLA_VF_TX_RATE: {
struct ifla_vf_tx_rate *ivt;
struct ifla_vf_info ivf;
ivt = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_tx_rate)
err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
ivt->rate);
if (ops->ndo_get_vf_config)
err = ops->ndo_get_vf_config(dev, ivt->vf,
&ivf);
if (err)
break;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_rate)
err = ops->ndo_set_vf_rate(dev, ivt->vf,
ivf.min_tx_rate,
ivt->rate);
break;
}
case IFLA_VF_RATE: {
struct ifla_vf_rate *ivt;
ivt = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_rate)
err = ops->ndo_set_vf_rate(dev, ivt->vf,
ivt->min_tx_rate,
ivt->max_tx_rate);
break;
}
case IFLA_VF_SPOOFCHK: {
@@ -1744,7 +1782,6 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
ops->dellink(dev, &list_kill);
unregister_netdevice_many(&list_kill);
list_del(&list_kill);
return 0;
}
@@ -2019,11 +2056,15 @@ replay:
if (ops->newlink) {
err = ops->newlink(net, dev, tb, data);
/* Drivers should call free_netdev() in ->destructor
* and unregister it on failure so that device could be
* finally freed in rtnl_unlock.
* and unregister it on failure after registration
* so that device could be finally freed in rtnl_unlock.
*/
if (err < 0)
if (err < 0) {
/* If device is not registered at all, free it now */
if (dev->reg_state == NETREG_UNINITIALIZED)
free_netdev(dev);
goto out;
}
} else {
err = register_netdevice(dev);
if (err < 0) {
@@ -2095,9 +2136,13 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
struct nlattr *tb[IFLA_MAX+1];
u32 ext_filter_mask = 0;
u16 min_ifinfo_dump_size = 0;
int hdrlen;
if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
ifla_policy) >= 0) {
/* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
if (tb[IFLA_EXT_MASK])
ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
}

View File

@@ -85,31 +85,6 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
#endif
#ifdef CONFIG_INET
__u32 secure_ip_id(__be32 daddr)
{
u32 hash[MD5_DIGEST_WORDS];
net_secret_init();
hash[0] = (__force __u32) daddr;
hash[1] = net_secret[13];
hash[2] = net_secret[14];
hash[3] = net_secret[15];
md5_transform(hash, net_secret);
return hash[0];
}
__u32 secure_ipv6_id(const __be32 daddr[4])
{
__u32 hash[4];
net_secret_init();
memcpy(hash, daddr, 16);
md5_transform(hash, net_secret);
return hash[0];
}
__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport)

View File

@@ -694,7 +694,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
#endif
memcpy(new->cb, old->cb, sizeof(old->cb));
new->csum = old->csum;
new->local_df = old->local_df;
new->ignore_df = old->ignore_df;
new->pkt_type = old->pkt_type;
new->ip_summed = old->ip_summed;
skb_copy_queue_mapping(new, old);
@@ -951,10 +951,13 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
EXPORT_SYMBOL(skb_copy);
/**
* __pskb_copy - create copy of an sk_buff with private head.
* __pskb_copy_fclone - create copy of an sk_buff with private head.
* @skb: buffer to copy
* @headroom: headroom of new skb
* @gfp_mask: allocation priority
* @fclone: if true allocate the copy of the skb from the fclone
* cache instead of the head cache; it is recommended to set this
* to true for the cases where the copy will likely be cloned
*
* Make a copy of both an &sk_buff and part of its data, located
* in header. Fragmented data remain shared. This is used when
@@ -964,11 +967,12 @@ EXPORT_SYMBOL(skb_copy);
* The returned buffer has a reference count of 1.
*/
struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
gfp_t gfp_mask, bool fclone)
{
unsigned int size = skb_headlen(skb) + headroom;
struct sk_buff *n = __alloc_skb(size, gfp_mask,
skb_alloc_rx_flag(skb), NUMA_NO_NODE);
int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
if (!n)
goto out;
@@ -1008,7 +1012,7 @@ struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
out:
return n;
}
EXPORT_SYMBOL(__pskb_copy);
EXPORT_SYMBOL(__pskb_copy_fclone);
/**
* pskb_expand_head - reallocate header of &sk_buff
@@ -2881,12 +2885,14 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
int pos;
int dummy;
__skb_push(head_skb, doffset);
proto = skb_network_protocol(head_skb, &dummy);
if (unlikely(!proto))
return ERR_PTR(-EINVAL);
csum = !!can_checksum_protocol(features, proto);
__skb_push(head_skb, doffset);
csum = !head_skb->encap_hdr_csum &&
!!can_checksum_protocol(features, proto);
headroom = skb_headroom(head_skb);
pos = skb_headlen(head_skb);
@@ -2983,6 +2989,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
skb_put(nskb, len),
len, 0);
SKB_GSO_CB(nskb)->csum_start =
skb_headroom(nskb) + offset;
continue;
}
@@ -3052,6 +3060,8 @@ perform_csum_check:
nskb->csum = skb_checksum(nskb, doffset,
nskb->len - doffset, 0);
nskb->ip_summed = CHECKSUM_NONE;
SKB_GSO_CB(nskb)->csum_start =
skb_headroom(nskb) + doffset;
}
} while ((offset += len) < head_skb->len);
@@ -3913,7 +3923,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->skb_iif = 0;
skb->local_df = 0;
skb->ignore_df = 0;
skb_dst_drop(skb);
skb->mark = 0;
secpath_reset(skb);

View File

@@ -784,7 +784,7 @@ set_rcvbuf:
break;
case SO_NO_CHECK:
sk->sk_no_check = valbool;
sk->sk_no_check_tx = valbool;
break;
case SO_PRIORITY:
@@ -1064,7 +1064,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_NO_CHECK:
v.val = sk->sk_no_check;
v.val = sk->sk_no_check_tx;
break;
case SO_PRIORITY:

77
net/core/tso.c Normal file
View File

@@ -0,0 +1,77 @@
#include <linux/export.h>
#include <net/ip.h>
#include <net/tso.h>
/* Calculate expected number of TX descriptors */
int tso_count_descs(struct sk_buff *skb)
{
/* The Marvell Way */
return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
}
EXPORT_SYMBOL(tso_count_descs);
void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
int size, bool is_last)
{
struct iphdr *iph;
struct tcphdr *tcph;
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
int mac_hdr_len = skb_network_offset(skb);
memcpy(hdr, skb->data, hdr_len);
iph = (struct iphdr *)(hdr + mac_hdr_len);
iph->id = htons(tso->ip_id);
iph->tot_len = htons(size + hdr_len - mac_hdr_len);
tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
tcph->seq = htonl(tso->tcp_seq);
tso->ip_id++;
if (!is_last) {
/* Clear all special flags for not last packet */
tcph->psh = 0;
tcph->fin = 0;
tcph->rst = 0;
}
}
EXPORT_SYMBOL(tso_build_hdr);
void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size)
{
tso->tcp_seq += size;
tso->size -= size;
tso->data += size;
if ((tso->size == 0) &&
(tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
/* Move to next segment */
tso->size = frag->size;
tso->data = page_address(frag->page.p) + frag->page_offset;
tso->next_frag_idx++;
}
}
EXPORT_SYMBOL(tso_build_data);
void tso_start(struct sk_buff *skb, struct tso_t *tso)
{
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
tso->ip_id = ntohs(ip_hdr(skb)->id);
tso->tcp_seq = ntohl(tcp_hdr(skb)->seq);
tso->next_frag_idx = 0;
/* Build first data */
tso->size = skb_headlen(skb) - hdr_len;
tso->data = skb->data + hdr_len;
if ((tso->size == 0) &&
(tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
/* Move to next segment */
tso->size = frag->size;
tso->data = page_address(frag->page.p) + frag->page_offset;
tso->next_frag_idx++;
}
}
EXPORT_SYMBOL(tso_start);

View File

@@ -1024,7 +1024,6 @@ static struct inet_protosw dccp_v4_protosw = {
.protocol = IPPROTO_DCCP,
.prot = &dccp_v4_prot,
.ops = &inet_dccp_ops,
.no_check = 0,
.flags = INET_PROTOSW_ICSK,
};

View File

@@ -1084,14 +1084,15 @@ EXPORT_SYMBOL_GPL(dccp_shutdown);
static inline int dccp_mib_init(void)
{
return snmp_mib_init((void __percpu **)dccp_statistics,
sizeof(struct dccp_mib),
__alignof__(struct dccp_mib));
dccp_statistics = alloc_percpu(struct dccp_mib);
if (!dccp_statistics)
return -ENOMEM;
return 0;
}
static inline void dccp_mib_exit(void)
{
snmp_mib_free((void __percpu **)dccp_statistics);
free_percpu(dccp_statistics);
}
static int thash_entries;

View File

@@ -20,6 +20,7 @@
/* Boundary values */
static int zero = 0,
one = 1,
u8_max = 0xFF;
static unsigned long seqw_min = DCCPF_SEQ_WMIN,
seqw_max = 0xFFFFFFFF; /* maximum on 32 bit */
@@ -58,7 +59,7 @@ static struct ctl_table dccp_default_table[] = {
.maxlen = sizeof(sysctl_dccp_request_retries),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra1 = &one,
.extra2 = &u8_max,
},
{

View File

@@ -280,7 +280,7 @@ static ktime_t dccp_timestamp_seed;
*/
u32 dccp_timestamp(void)
{
s64 delta = ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
u64 delta = (u64)ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
do_div(delta, 10);
return delta;

View File

@@ -481,7 +481,7 @@ static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gf
sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
sk->sk_destruct = dn_destruct;
sk->sk_no_check = 1;
sk->sk_no_check_tx = 1;
sk->sk_family = PF_DECnet;
sk->sk_protocol = 0;
sk->sk_allocation = gfp;

View File

@@ -93,8 +93,8 @@ int dns_query(const char *type, const char *name, size_t namelen,
}
if (!namelen)
namelen = strlen(name);
if (namelen < 3)
namelen = strnlen(name, 256);
if (namelen < 3 || namelen > 255)
return -EINVAL;
desclen += namelen + 1;
@@ -149,7 +149,9 @@ int dns_query(const char *type, const char *name, size_t namelen,
if (!*_result)
goto put;
memcpy(*_result, upayload->data, len + 1);
memcpy(*_result, upayload->data, len);
*_result[len] = '\0';
if (_expiry)
*_expiry = rkey->expiry;

View File

@@ -346,7 +346,7 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
return slave_dev;
slave_dev->features = master->vlan_features;
SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops);
slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
eth_hw_addr_inherit(slave_dev, master);
slave_dev->tx_queue_len = 0;

View File

@@ -92,6 +92,7 @@ static int lowpan_header_create(struct sk_buff *skb,
const u8 *saddr = _saddr;
const u8 *daddr = _daddr;
struct ieee802154_addr sa, da;
struct ieee802154_mac_cb *cb = mac_cb_init(skb);
/* TODO:
* if this package isn't ipv6 one, where should it be routed?
@@ -115,8 +116,7 @@ static int lowpan_header_create(struct sk_buff *skb,
* from MAC subif of the 'dev' and 'real_dev' network devices, but
* this isn't implemented in mainline yet, so currently we assign 0xff
*/
mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
cb->type = IEEE802154_FC_TYPE_DATA;
/* prepare wpan address data */
sa.mode = IEEE802154_ADDR_LONG;
@@ -135,11 +135,10 @@ static int lowpan_header_create(struct sk_buff *skb,
} else {
da.mode = IEEE802154_ADDR_LONG;
da.extended_addr = ieee802154_devaddr_from_raw(daddr);
/* request acknowledgment */
mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
}
cb->ackreq = !lowpan_is_addr_broadcast(daddr);
return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
type, (void *)&da, (void *)&sa, 0);
}
@@ -221,139 +220,149 @@ static int lowpan_set_address(struct net_device *dev, void *p)
return 0;
}
static struct sk_buff*
lowpan_alloc_frag(struct sk_buff *skb, int size,
const struct ieee802154_hdr *master_hdr)
{
struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev;
struct sk_buff *frag;
int rc;
frag = alloc_skb(real_dev->hard_header_len +
real_dev->needed_tailroom + size,
GFP_ATOMIC);
if (likely(frag)) {
frag->dev = real_dev;
frag->priority = skb->priority;
skb_reserve(frag, real_dev->hard_header_len);
skb_reset_network_header(frag);
*mac_cb(frag) = *mac_cb(skb);
rc = dev_hard_header(frag, real_dev, 0, &master_hdr->dest,
&master_hdr->source, size);
if (rc < 0) {
kfree_skb(frag);
return ERR_PTR(-rc);
}
} else {
frag = ERR_PTR(ENOMEM);
}
return frag;
}
static int
lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
int mlen, int plen, int offset, int type)
lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
u8 *frag_hdr, int frag_hdrlen,
int offset, int len)
{
struct sk_buff *frag;
int hlen;
hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
if (IS_ERR(frag))
return -PTR_ERR(frag);
frag = netdev_alloc_skb(skb->dev,
hlen + mlen + plen + IEEE802154_MFR_SIZE);
if (!frag)
return -ENOMEM;
memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
frag->priority = skb->priority;
/* copy header, MFR and payload */
skb_put(frag, mlen);
skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
skb_put(frag, hlen);
skb_copy_to_linear_data_offset(frag, mlen, head, hlen);
skb_put(frag, plen);
skb_copy_to_linear_data_offset(frag, mlen + hlen,
skb_network_header(skb) + offset, plen);
raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len);
raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
return dev_queue_xmit(frag);
}
static int
lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
const struct ieee802154_hdr *wpan_hdr)
{
int err;
u16 dgram_offset, dgram_size, payload_length, header_length,
lowpan_size, frag_plen, offset;
__be16 tag;
u8 head[5];
u16 dgram_size, dgram_offset;
__be16 frag_tag;
u8 frag_hdr[5];
int frag_cap, frag_len, payload_cap, rc;
int skb_unprocessed, skb_offset;
header_length = skb->mac_len;
payload_length = skb->len - header_length;
tag = lowpan_dev_info(dev)->fragment_tag++;
lowpan_size = skb_network_header_len(skb);
dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
header_length;
skb->mac_len;
frag_tag = lowpan_dev_info(dev)->fragment_tag++;
/* first fragment header */
head[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x7);
head[1] = dgram_size & 0xff;
memcpy(head + 2, &tag, sizeof(tag));
frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
frag_hdr[1] = dgram_size & 0xff;
memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
/* calc the nearest payload length(divided to 8) for first fragment
* which fits into a IEEE802154_MTU
*/
frag_plen = round_down(IEEE802154_MTU - header_length -
LOWPAN_FRAG1_HEAD_SIZE - lowpan_size -
IEEE802154_MFR_SIZE, 8);
payload_cap = ieee802154_max_payload(wpan_hdr);
err = lowpan_fragment_xmit(skb, head, header_length,
frag_plen + lowpan_size, 0,
LOWPAN_DISPATCH_FRAG1);
if (err) {
frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
skb_network_header_len(skb), 8);
skb_offset = skb_network_header_len(skb);
skb_unprocessed = skb->len - skb->mac_len - skb_offset;
rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
LOWPAN_FRAG1_HEAD_SIZE, 0,
frag_len + skb_network_header_len(skb));
if (rc) {
pr_debug("%s unable to send FRAG1 packet (tag: %d)",
__func__, tag);
goto exit;
__func__, frag_tag);
goto err;
}
offset = lowpan_size + frag_plen;
dgram_offset += frag_plen;
frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
/* next fragment header */
head[0] &= ~LOWPAN_DISPATCH_FRAG1;
head[0] |= LOWPAN_DISPATCH_FRAGN;
do {
dgram_offset += frag_len;
skb_offset += frag_len;
skb_unprocessed -= frag_len;
frag_len = min(frag_cap, skb_unprocessed);
frag_plen = round_down(IEEE802154_MTU - header_length -
LOWPAN_FRAGN_HEAD_SIZE - IEEE802154_MFR_SIZE, 8);
frag_hdr[4] = dgram_offset >> 3;
while (payload_length - offset > 0) {
int len = frag_plen;
head[4] = dgram_offset >> 3;
if (payload_length - offset < len)
len = payload_length - offset;
err = lowpan_fragment_xmit(skb, head, header_length, len,
offset, LOWPAN_DISPATCH_FRAGN);
if (err) {
rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
frag_len);
if (rc) {
pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
__func__, tag, offset);
goto exit;
__func__, frag_tag, skb_offset);
goto err;
}
} while (skb_unprocessed > frag_cap);
offset += len;
dgram_offset += len;
}
consume_skb(skb);
return NET_XMIT_SUCCESS;
exit:
return err;
err:
kfree_skb(skb);
return rc;
}
static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
{
int err = -1;
struct ieee802154_hdr wpan_hdr;
int max_single;
pr_debug("package xmit\n");
skb->dev = lowpan_dev_info(dev)->real_dev;
if (skb->dev == NULL) {
pr_debug("ERROR: no real wpan device found\n");
goto error;
if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
kfree_skb(skb);
return NET_XMIT_DROP;
}
/* Send directly if less than the MTU minus the 2 checksum bytes. */
if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
err = dev_queue_xmit(skb);
goto out;
max_single = ieee802154_max_payload(&wpan_hdr);
if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
skb->dev = lowpan_dev_info(dev)->real_dev;
return dev_queue_xmit(skb);
} else {
netdev_tx_t rc;
pr_debug("frame is too big, fragmentation is needed\n");
rc = lowpan_xmit_fragmented(skb, dev, &wpan_hdr);
return rc < 0 ? NET_XMIT_DROP : rc;
}
pr_debug("frame is too big, fragmentation is needed\n");
err = lowpan_skb_fragmentation(skb, dev);
error:
dev_kfree_skb(skb);
out:
if (err)
pr_debug("ERROR: xmit failed\n");
return (err < 0) ? NET_XMIT_DROP : err;
}
static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)

View File

@@ -21,6 +21,7 @@
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
*/
#include <linux/capability.h>
#include <linux/net.h>
#include <linux/module.h>
#include <linux/if_arp.h>
@@ -45,7 +46,12 @@ struct dgram_sock {
struct ieee802154_addr dst_addr;
unsigned int bound:1;
unsigned int connected:1;
unsigned int want_ack:1;
unsigned int secen:1;
unsigned int secen_override:1;
unsigned int seclevel:3;
unsigned int seclevel_override:1;
};
static inline struct dgram_sock *dgram_sk(const struct sock *sk)
@@ -73,10 +79,7 @@ static int dgram_init(struct sock *sk)
{
struct dgram_sock *ro = dgram_sk(sk);
ro->dst_addr.mode = IEEE802154_ADDR_LONG;
ro->dst_addr.pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
ro->want_ack = 1;
memset(&ro->dst_addr.extended_addr, 0xff, IEEE802154_ADDR_LEN);
return 0;
}
@@ -183,6 +186,7 @@ static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
}
ieee802154_addr_from_sa(&ro->dst_addr, &addr->addr);
ro->connected = 1;
out:
release_sock(sk);
@@ -194,10 +198,7 @@ static int dgram_disconnect(struct sock *sk, int flags)
struct dgram_sock *ro = dgram_sk(sk);
lock_sock(sk);
ro->dst_addr.mode = IEEE802154_ADDR_LONG;
memset(&ro->dst_addr.extended_addr, 0xff, IEEE802154_ADDR_LEN);
ro->connected = 0;
release_sock(sk);
return 0;
@@ -209,7 +210,9 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
struct net_device *dev;
unsigned int mtu;
struct sk_buff *skb;
struct ieee802154_mac_cb *cb;
struct dgram_sock *ro = dgram_sk(sk);
struct ieee802154_addr dst_addr;
int hlen, tlen;
int err;
@@ -218,6 +221,11 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
return -EOPNOTSUPP;
}
if (!ro->connected && !msg->msg_name)
return -EDESTADDRREQ;
else if (ro->connected && msg->msg_name)
return -EISCONN;
if (!ro->bound)
dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
else
@@ -249,18 +257,28 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
skb_reset_network_header(skb);
mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
if (ro->want_ack)
mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
cb = mac_cb_init(skb);
cb->type = IEEE802154_FC_TYPE_DATA;
cb->ackreq = ro->want_ack;
mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &ro->dst_addr,
ro->bound ? &ro->src_addr : NULL, size);
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_ieee802154*, daddr, msg->msg_name);
ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
} else {
dst_addr = ro->dst_addr;
}
cb->secen = ro->secen;
cb->secen_override = ro->secen_override;
cb->seclevel = ro->seclevel;
cb->seclevel_override = ro->seclevel_override;
err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &dst_addr,
ro->bound ? &ro->src_addr : NULL, size);
if (err < 0)
goto out_skb;
skb_reset_mac_header(skb);
err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
if (err < 0)
goto out_skb;
@@ -419,6 +437,20 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname,
case WPAN_WANTACK:
val = ro->want_ack;
break;
case WPAN_SECURITY:
if (!ro->secen_override)
val = WPAN_SECURITY_DEFAULT;
else if (ro->secen)
val = WPAN_SECURITY_ON;
else
val = WPAN_SECURITY_OFF;
break;
case WPAN_SECURITY_LEVEL:
if (!ro->seclevel_override)
val = WPAN_SECURITY_LEVEL_DEFAULT;
else
val = ro->seclevel;
break;
default:
return -ENOPROTOOPT;
}
@@ -434,6 +466,7 @@ static int dgram_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct dgram_sock *ro = dgram_sk(sk);
struct net *net = sock_net(sk);
int val;
int err = 0;
@@ -449,6 +482,47 @@ static int dgram_setsockopt(struct sock *sk, int level, int optname,
case WPAN_WANTACK:
ro->want_ack = !!val;
break;
case WPAN_SECURITY:
if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
!ns_capable(net->user_ns, CAP_NET_RAW)) {
err = -EPERM;
break;
}
switch (val) {
case WPAN_SECURITY_DEFAULT:
ro->secen_override = 0;
break;
case WPAN_SECURITY_ON:
ro->secen_override = 1;
ro->secen = 1;
break;
case WPAN_SECURITY_OFF:
ro->secen_override = 1;
ro->secen = 0;
break;
default:
err = -EINVAL;
break;
}
break;
case WPAN_SECURITY_LEVEL:
if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
!ns_capable(net->user_ns, CAP_NET_RAW)) {
err = -EPERM;
break;
}
if (val < WPAN_SECURITY_LEVEL_DEFAULT ||
val > IEEE802154_SCF_SECLEVEL_ENC_MIC128) {
err = -EINVAL;
} else if (val == WPAN_SECURITY_LEVEL_DEFAULT) {
ro->seclevel_override = 0;
} else {
ro->seclevel_override = 1;
ro->seclevel = val;
}
break;
default:
err = -ENOPROTOOPT;
break;

View File

@@ -195,15 +195,16 @@ ieee802154_hdr_get_sechdr(const u8 *buf, struct ieee802154_sechdr *hdr)
return pos;
}
static int ieee802154_sechdr_lengths[4] = {
[IEEE802154_SCF_KEY_IMPLICIT] = 5,
[IEEE802154_SCF_KEY_INDEX] = 6,
[IEEE802154_SCF_KEY_SHORT_INDEX] = 10,
[IEEE802154_SCF_KEY_HW_INDEX] = 14,
};
static int ieee802154_hdr_sechdr_len(u8 sc)
{
switch (IEEE802154_SCF_KEY_ID_MODE(sc)) {
case IEEE802154_SCF_KEY_IMPLICIT: return 5;
case IEEE802154_SCF_KEY_INDEX: return 6;
case IEEE802154_SCF_KEY_SHORT_INDEX: return 10;
case IEEE802154_SCF_KEY_HW_INDEX: return 14;
default: return -EINVAL;
}
return ieee802154_sechdr_lengths[IEEE802154_SCF_KEY_ID_MODE(sc)];
}
static int ieee802154_hdr_minlen(const struct ieee802154_hdr *hdr)
@@ -285,3 +286,40 @@ ieee802154_hdr_peek_addrs(const struct sk_buff *skb, struct ieee802154_hdr *hdr)
return pos;
}
EXPORT_SYMBOL_GPL(ieee802154_hdr_peek_addrs);
int
ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr)
{
const u8 *buf = skb_mac_header(skb);
int pos;
pos = ieee802154_hdr_peek_addrs(skb, hdr);
if (pos < 0)
return -EINVAL;
if (hdr->fc.security_enabled) {
u8 key_id_mode = IEEE802154_SCF_KEY_ID_MODE(*(buf + pos));
int want = pos + ieee802154_sechdr_lengths[key_id_mode];
if (buf + want > skb_tail_pointer(skb))
return -EINVAL;
pos += ieee802154_hdr_get_sechdr(buf + pos, &hdr->sec);
}
return pos;
}
EXPORT_SYMBOL_GPL(ieee802154_hdr_peek);
int ieee802154_max_payload(const struct ieee802154_hdr *hdr)
{
int hlen = ieee802154_hdr_minlen(hdr);
if (hdr->fc.security_enabled) {
hlen += ieee802154_sechdr_lengths[hdr->sec.key_id_mode] - 1;
hlen += ieee802154_sechdr_authtag_len(&hdr->sec);
}
return IEEE802154_MTU - hlen - IEEE802154_MFR_SIZE;
}
EXPORT_SYMBOL_GPL(ieee802154_max_payload);

View File

@@ -68,4 +68,23 @@ int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info);
int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb);
int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info);
int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info);
int ieee802154_llsec_setparams(struct sk_buff *skb, struct genl_info *info);
int ieee802154_llsec_add_key(struct sk_buff *skb, struct genl_info *info);
int ieee802154_llsec_del_key(struct sk_buff *skb, struct genl_info *info);
int ieee802154_llsec_dump_keys(struct sk_buff *skb,
struct netlink_callback *cb);
int ieee802154_llsec_add_dev(struct sk_buff *skb, struct genl_info *info);
int ieee802154_llsec_del_dev(struct sk_buff *skb, struct genl_info *info);
int ieee802154_llsec_dump_devs(struct sk_buff *skb,
struct netlink_callback *cb);
int ieee802154_llsec_add_devkey(struct sk_buff *skb, struct genl_info *info);
int ieee802154_llsec_del_devkey(struct sk_buff *skb, struct genl_info *info);
int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
struct netlink_callback *cb);
int ieee802154_llsec_add_seclevel(struct sk_buff *skb, struct genl_info *info);
int ieee802154_llsec_del_seclevel(struct sk_buff *skb, struct genl_info *info);
int ieee802154_llsec_dump_seclevels(struct sk_buff *skb,
struct netlink_callback *cb);
#endif

View File

@@ -124,6 +124,26 @@ static const struct genl_ops ieee8021154_ops[] = {
IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface,
ieee802154_dump_iface),
IEEE802154_OP(IEEE802154_SET_MACPARAMS, ieee802154_set_macparams),
IEEE802154_OP(IEEE802154_LLSEC_GETPARAMS, ieee802154_llsec_getparams),
IEEE802154_OP(IEEE802154_LLSEC_SETPARAMS, ieee802154_llsec_setparams),
IEEE802154_DUMP(IEEE802154_LLSEC_LIST_KEY, NULL,
ieee802154_llsec_dump_keys),
IEEE802154_OP(IEEE802154_LLSEC_ADD_KEY, ieee802154_llsec_add_key),
IEEE802154_OP(IEEE802154_LLSEC_DEL_KEY, ieee802154_llsec_del_key),
IEEE802154_DUMP(IEEE802154_LLSEC_LIST_DEV, NULL,
ieee802154_llsec_dump_devs),
IEEE802154_OP(IEEE802154_LLSEC_ADD_DEV, ieee802154_llsec_add_dev),
IEEE802154_OP(IEEE802154_LLSEC_DEL_DEV, ieee802154_llsec_del_dev),
IEEE802154_DUMP(IEEE802154_LLSEC_LIST_DEVKEY, NULL,
ieee802154_llsec_dump_devkeys),
IEEE802154_OP(IEEE802154_LLSEC_ADD_DEVKEY, ieee802154_llsec_add_devkey),
IEEE802154_OP(IEEE802154_LLSEC_DEL_DEVKEY, ieee802154_llsec_del_devkey),
IEEE802154_DUMP(IEEE802154_LLSEC_LIST_SECLEVEL, NULL,
ieee802154_llsec_dump_seclevels),
IEEE802154_OP(IEEE802154_LLSEC_ADD_SECLEVEL,
ieee802154_llsec_add_seclevel),
IEEE802154_OP(IEEE802154_LLSEC_DEL_SECLEVEL,
ieee802154_llsec_del_seclevel),
};
static const struct genl_multicast_group ieee802154_mcgrps[] = {

View File

@@ -715,3 +715,812 @@ out:
dev_put(dev);
return rc;
}
static int
ieee802154_llsec_parse_key_id(struct genl_info *info,
struct ieee802154_llsec_key_id *desc)
{
memset(desc, 0, sizeof(*desc));
if (!info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE])
return -EINVAL;
desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]);
if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
if (!info->attrs[IEEE802154_ATTR_PAN_ID] &&
!(info->attrs[IEEE802154_ATTR_SHORT_ADDR] ||
info->attrs[IEEE802154_ATTR_HW_ADDR]))
return -EINVAL;
desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
if (info->attrs[IEEE802154_ATTR_SHORT_ADDR]) {
desc->device_addr.mode = IEEE802154_ADDR_SHORT;
desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
} else {
desc->device_addr.mode = IEEE802154_ADDR_LONG;
desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
}
}
if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT &&
!info->attrs[IEEE802154_ATTR_LLSEC_KEY_ID])
return -EINVAL;
if (desc->mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
!info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT])
return -EINVAL;
if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX &&
!info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED])
return -EINVAL;
if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT)
desc->id = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_ID]);
switch (desc->mode) {
case IEEE802154_SCF_KEY_SHORT_INDEX:
{
u32 source = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT]);
desc->short_source = cpu_to_le32(source);
break;
}
case IEEE802154_SCF_KEY_HW_INDEX:
desc->extended_source = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED]);
break;
}
return 0;
}
static int
ieee802154_llsec_fill_key_id(struct sk_buff *msg,
const struct ieee802154_llsec_key_id *desc)
{
if (nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_MODE, desc->mode))
return -EMSGSIZE;
if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
if (nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID,
desc->device_addr.pan_id))
return -EMSGSIZE;
if (desc->device_addr.mode == IEEE802154_ADDR_SHORT &&
nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR,
desc->device_addr.short_addr))
return -EMSGSIZE;
if (desc->device_addr.mode == IEEE802154_ADDR_LONG &&
nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR,
desc->device_addr.extended_addr))
return -EMSGSIZE;
}
if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT &&
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_ID, desc->id))
return -EMSGSIZE;
if (desc->mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
nla_put_u32(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT,
le32_to_cpu(desc->short_source)))
return -EMSGSIZE;
if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX &&
nla_put_hwaddr(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED,
desc->extended_source))
return -EMSGSIZE;
return 0;
}
int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
struct net_device *dev = NULL;
int rc = -ENOBUFS;
struct ieee802154_mlme_ops *ops;
void *hdr;
struct ieee802154_llsec_params params;
pr_debug("%s\n", __func__);
dev = ieee802154_nl_get_dev(info);
if (!dev)
return -ENODEV;
ops = ieee802154_mlme_ops(dev);
if (!ops->llsec) {
rc = -EOPNOTSUPP;
goto out_dev;
}
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
goto out_dev;
hdr = genlmsg_put(msg, 0, info->snd_seq, &nl802154_family, 0,
IEEE802154_LLSEC_GETPARAMS);
if (!hdr)
goto out_free;
rc = ops->llsec->get_params(dev, &params);
if (rc < 0)
goto out_free;
if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_ENABLED, params.enabled) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
be32_to_cpu(params.frame_counter)) ||
ieee802154_llsec_fill_key_id(msg, &params.out_key))
goto out_free;
dev_put(dev);
return ieee802154_nl_reply(msg, info);
out_free:
nlmsg_free(msg);
out_dev:
dev_put(dev);
return rc;
}
int ieee802154_llsec_setparams(struct sk_buff *skb, struct genl_info *info)
{
struct net_device *dev = NULL;
int rc = -EINVAL;
struct ieee802154_mlme_ops *ops;
struct ieee802154_llsec_params params;
int changed = 0;
pr_debug("%s\n", __func__);
dev = ieee802154_nl_get_dev(info);
if (!dev)
return -ENODEV;
if (!info->attrs[IEEE802154_ATTR_LLSEC_ENABLED] &&
!info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE] &&
!info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL])
goto out;
ops = ieee802154_mlme_ops(dev);
if (!ops->llsec) {
rc = -EOPNOTSUPP;
goto out;
}
if (info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL] &&
nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) > 7)
goto out;
if (info->attrs[IEEE802154_ATTR_LLSEC_ENABLED]) {
params.enabled = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_ENABLED]);
changed |= IEEE802154_LLSEC_PARAM_ENABLED;
}
if (info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]) {
if (ieee802154_llsec_parse_key_id(info, &params.out_key))
goto out;
changed |= IEEE802154_LLSEC_PARAM_OUT_KEY;
}
if (info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) {
params.out_level = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]);
changed |= IEEE802154_LLSEC_PARAM_OUT_LEVEL;
}
if (info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]) {
u32 fc = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
params.frame_counter = cpu_to_be32(fc);
changed |= IEEE802154_LLSEC_PARAM_FRAME_COUNTER;
}
rc = ops->llsec->set_params(dev, &params, changed);
dev_put(dev);
return rc;
out:
dev_put(dev);
return rc;
}
struct llsec_dump_data {
struct sk_buff *skb;
int s_idx, s_idx2;
int portid;
int nlmsg_seq;
struct net_device *dev;
struct ieee802154_mlme_ops *ops;
struct ieee802154_llsec_table *table;
};
static int
ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb,
int (*step)(struct llsec_dump_data*))
{
struct net *net = sock_net(skb->sk);
struct net_device *dev;
struct llsec_dump_data data;
int idx = 0;
int first_dev = cb->args[0];
int rc;
for_each_netdev(net, dev) {
if (idx < first_dev || dev->type != ARPHRD_IEEE802154)
goto skip;
data.ops = ieee802154_mlme_ops(dev);
if (!data.ops->llsec)
goto skip;
data.skb = skb;
data.s_idx = cb->args[1];
data.s_idx2 = cb->args[2];
data.dev = dev;
data.portid = NETLINK_CB(cb->skb).portid;
data.nlmsg_seq = cb->nlh->nlmsg_seq;
data.ops->llsec->lock_table(dev);
data.ops->llsec->get_table(data.dev, &data.table);
rc = step(&data);
data.ops->llsec->unlock_table(dev);
if (rc < 0)
break;
skip:
idx++;
}
cb->args[0] = idx;
return skb->len;
}
static int
ieee802154_nl_llsec_change(struct sk_buff *skb, struct genl_info *info,
int (*fn)(struct net_device*, struct genl_info*))
{
struct net_device *dev = NULL;
int rc = -EINVAL;
dev = ieee802154_nl_get_dev(info);
if (!dev)
return -ENODEV;
if (!ieee802154_mlme_ops(dev)->llsec)
rc = -EOPNOTSUPP;
else
rc = fn(dev, info);
dev_put(dev);
return rc;
}
static int
ieee802154_llsec_parse_key(struct genl_info *info,
struct ieee802154_llsec_key *key)
{
u8 frames;
u32 commands[256 / 32];
memset(key, 0, sizeof(*key));
if (!info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES] ||
!info->attrs[IEEE802154_ATTR_LLSEC_KEY_BYTES])
return -EINVAL;
frames = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES]);
if ((frames & BIT(IEEE802154_FC_TYPE_MAC_CMD)) &&
!info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS])
return -EINVAL;
if (info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS]) {
nla_memcpy(commands,
info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS],
256 / 8);
if (commands[0] || commands[1] || commands[2] || commands[3] ||
commands[4] || commands[5] || commands[6] ||
commands[7] >= BIT(IEEE802154_CMD_GTS_REQ + 1))
return -EINVAL;
key->cmd_frame_ids = commands[7];
}
key->frame_types = frames;
nla_memcpy(key->key, info->attrs[IEEE802154_ATTR_LLSEC_KEY_BYTES],
IEEE802154_LLSEC_KEY_SIZE);
return 0;
}
static int llsec_add_key(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
struct ieee802154_llsec_key key;
struct ieee802154_llsec_key_id id;
if (ieee802154_llsec_parse_key(info, &key) ||
ieee802154_llsec_parse_key_id(info, &id))
return -EINVAL;
return ops->llsec->add_key(dev, &id, &key);
}
int ieee802154_llsec_add_key(struct sk_buff *skb, struct genl_info *info)
{
if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
(NLM_F_CREATE | NLM_F_EXCL))
return -EINVAL;
return ieee802154_nl_llsec_change(skb, info, llsec_add_key);
}
static int llsec_remove_key(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
struct ieee802154_llsec_key_id id;
if (ieee802154_llsec_parse_key_id(info, &id))
return -EINVAL;
return ops->llsec->del_key(dev, &id);
}
int ieee802154_llsec_del_key(struct sk_buff *skb, struct genl_info *info)
{
return ieee802154_nl_llsec_change(skb, info, llsec_remove_key);
}
static int
ieee802154_nl_fill_key(struct sk_buff *msg, u32 portid, u32 seq,
const struct ieee802154_llsec_key_entry *key,
const struct net_device *dev)
{
void *hdr;
u32 commands[256 / 32];
hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
IEEE802154_LLSEC_LIST_KEY);
if (!hdr)
goto out;
if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
ieee802154_llsec_fill_key_id(msg, &key->id) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES,
key->key->frame_types))
goto nla_put_failure;
if (key->key->frame_types & BIT(IEEE802154_FC_TYPE_MAC_CMD)) {
memset(commands, 0, sizeof(commands));
commands[7] = key->key->cmd_frame_ids;
if (nla_put(msg, IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS,
sizeof(commands), commands))
goto nla_put_failure;
}
if (nla_put(msg, IEEE802154_ATTR_LLSEC_KEY_BYTES,
IEEE802154_LLSEC_KEY_SIZE, key->key->key))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
out:
return -EMSGSIZE;
}
static int llsec_iter_keys(struct llsec_dump_data *data)
{
struct ieee802154_llsec_key_entry *pos;
int rc = 0, idx = 0;
list_for_each_entry(pos, &data->table->keys, list) {
if (idx++ < data->s_idx)
continue;
if (ieee802154_nl_fill_key(data->skb, data->portid,
data->nlmsg_seq, pos, data->dev)) {
rc = -EMSGSIZE;
break;
}
data->s_idx++;
}
return rc;
}
int ieee802154_llsec_dump_keys(struct sk_buff *skb, struct netlink_callback *cb)
{
return ieee802154_llsec_dump_table(skb, cb, llsec_iter_keys);
}
static int
llsec_parse_dev(struct genl_info *info,
struct ieee802154_llsec_device *dev)
{
memset(dev, 0, sizeof(*dev));
if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER] ||
!info->attrs[IEEE802154_ATTR_HW_ADDR] ||
!info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE] ||
!info->attrs[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE] ||
(!!info->attrs[IEEE802154_ATTR_PAN_ID] !=
!!info->attrs[IEEE802154_ATTR_SHORT_ADDR]))
return -EINVAL;
if (info->attrs[IEEE802154_ATTR_PAN_ID]) {
dev->pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
dev->short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
} else {
dev->short_addr = cpu_to_le16(IEEE802154_ADDR_UNDEF);
}
dev->hwaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
dev->frame_counter = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
dev->seclevel_exempt = !!nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE]);
dev->key_mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE]);
if (dev->key_mode >= __IEEE802154_LLSEC_DEVKEY_MAX)
return -EINVAL;
return 0;
}
static int llsec_add_dev(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
struct ieee802154_llsec_device desc;
if (llsec_parse_dev(info, &desc))
return -EINVAL;
return ops->llsec->add_dev(dev, &desc);
}
int ieee802154_llsec_add_dev(struct sk_buff *skb, struct genl_info *info)
{
if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
(NLM_F_CREATE | NLM_F_EXCL))
return -EINVAL;
return ieee802154_nl_llsec_change(skb, info, llsec_add_dev);
}
static int llsec_del_dev(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
__le64 devaddr;
if (!info->attrs[IEEE802154_ATTR_HW_ADDR])
return -EINVAL;
devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
return ops->llsec->del_dev(dev, devaddr);
}
int ieee802154_llsec_del_dev(struct sk_buff *skb, struct genl_info *info)
{
return ieee802154_nl_llsec_change(skb, info, llsec_del_dev);
}
static int
ieee802154_nl_fill_dev(struct sk_buff *msg, u32 portid, u32 seq,
const struct ieee802154_llsec_device *desc,
const struct net_device *dev)
{
void *hdr;
hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
IEEE802154_LLSEC_LIST_DEV);
if (!hdr)
goto out;
if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, desc->pan_id) ||
nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR,
desc->short_addr) ||
nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr) ||
nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
desc->frame_counter) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
desc->seclevel_exempt) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_KEY_MODE, desc->key_mode))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
out:
return -EMSGSIZE;
}
static int llsec_iter_devs(struct llsec_dump_data *data)
{
struct ieee802154_llsec_device *pos;
int rc = 0, idx = 0;
list_for_each_entry(pos, &data->table->devices, list) {
if (idx++ < data->s_idx)
continue;
if (ieee802154_nl_fill_dev(data->skb, data->portid,
data->nlmsg_seq, pos, data->dev)) {
rc = -EMSGSIZE;
break;
}
data->s_idx++;
}
return rc;
}
int ieee802154_llsec_dump_devs(struct sk_buff *skb, struct netlink_callback *cb)
{
return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devs);
}
static int llsec_add_devkey(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
struct ieee802154_llsec_device_key key;
__le64 devaddr;
if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER] ||
!info->attrs[IEEE802154_ATTR_HW_ADDR] ||
ieee802154_llsec_parse_key_id(info, &key.key_id))
return -EINVAL;
devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
key.frame_counter = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
return ops->llsec->add_devkey(dev, devaddr, &key);
}
int ieee802154_llsec_add_devkey(struct sk_buff *skb, struct genl_info *info)
{
if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
(NLM_F_CREATE | NLM_F_EXCL))
return -EINVAL;
return ieee802154_nl_llsec_change(skb, info, llsec_add_devkey);
}
static int llsec_del_devkey(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
struct ieee802154_llsec_device_key key;
__le64 devaddr;
if (!info->attrs[IEEE802154_ATTR_HW_ADDR] ||
ieee802154_llsec_parse_key_id(info, &key.key_id))
return -EINVAL;
devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
return ops->llsec->del_devkey(dev, devaddr, &key);
}
int ieee802154_llsec_del_devkey(struct sk_buff *skb, struct genl_info *info)
{
return ieee802154_nl_llsec_change(skb, info, llsec_del_devkey);
}
static int
ieee802154_nl_fill_devkey(struct sk_buff *msg, u32 portid, u32 seq,
__le64 devaddr,
const struct ieee802154_llsec_device_key *devkey,
const struct net_device *dev)
{
void *hdr;
hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
IEEE802154_LLSEC_LIST_DEVKEY);
if (!hdr)
goto out;
if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr) ||
nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
devkey->frame_counter) ||
ieee802154_llsec_fill_key_id(msg, &devkey->key_id))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
out:
return -EMSGSIZE;
}
static int llsec_iter_devkeys(struct llsec_dump_data *data)
{
struct ieee802154_llsec_device *dpos;
struct ieee802154_llsec_device_key *kpos;
int rc = 0, idx = 0, idx2;
list_for_each_entry(dpos, &data->table->devices, list) {
if (idx++ < data->s_idx)
continue;
idx2 = 0;
list_for_each_entry(kpos, &dpos->keys, list) {
if (idx2++ < data->s_idx2)
continue;
if (ieee802154_nl_fill_devkey(data->skb, data->portid,
data->nlmsg_seq,
dpos->hwaddr, kpos,
data->dev)) {
return rc = -EMSGSIZE;
}
data->s_idx2++;
}
data->s_idx++;
}
return rc;
}
int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
struct netlink_callback *cb)
{
return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devkeys);
}
static int
llsec_parse_seclevel(struct genl_info *info,
struct ieee802154_llsec_seclevel *sl)
{
memset(sl, 0, sizeof(*sl));
if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_TYPE] ||
!info->attrs[IEEE802154_ATTR_LLSEC_SECLEVELS] ||
!info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE])
return -EINVAL;
sl->frame_type = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_TYPE]);
if (sl->frame_type == IEEE802154_FC_TYPE_MAC_CMD) {
if (!info->attrs[IEEE802154_ATTR_LLSEC_CMD_FRAME_ID])
return -EINVAL;
sl->cmd_frame_id = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_CMD_FRAME_ID]);
}
sl->sec_levels = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVELS]);
sl->device_override = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE]);
return 0;
}
static int llsec_add_seclevel(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
struct ieee802154_llsec_seclevel sl;
if (llsec_parse_seclevel(info, &sl))
return -EINVAL;
return ops->llsec->add_seclevel(dev, &sl);
}
int ieee802154_llsec_add_seclevel(struct sk_buff *skb, struct genl_info *info)
{
if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
(NLM_F_CREATE | NLM_F_EXCL))
return -EINVAL;
return ieee802154_nl_llsec_change(skb, info, llsec_add_seclevel);
}
static int llsec_del_seclevel(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
struct ieee802154_llsec_seclevel sl;
if (llsec_parse_seclevel(info, &sl))
return -EINVAL;
return ops->llsec->del_seclevel(dev, &sl);
}
int ieee802154_llsec_del_seclevel(struct sk_buff *skb, struct genl_info *info)
{
return ieee802154_nl_llsec_change(skb, info, llsec_del_seclevel);
}
static int
ieee802154_nl_fill_seclevel(struct sk_buff *msg, u32 portid, u32 seq,
const struct ieee802154_llsec_seclevel *sl,
const struct net_device *dev)
{
void *hdr;
hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
IEEE802154_LLSEC_LIST_SECLEVEL);
if (!hdr)
goto out;
if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_FRAME_TYPE, sl->frame_type) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVELS, sl->sec_levels) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
sl->device_override))
goto nla_put_failure;
if (sl->frame_type == IEEE802154_FC_TYPE_MAC_CMD &&
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_CMD_FRAME_ID,
sl->cmd_frame_id))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
out:
return -EMSGSIZE;
}
static int llsec_iter_seclevels(struct llsec_dump_data *data)
{
struct ieee802154_llsec_seclevel *pos;
int rc = 0, idx = 0;
list_for_each_entry(pos, &data->table->security_levels, list) {
if (idx++ < data->s_idx)
continue;
if (ieee802154_nl_fill_seclevel(data->skb, data->portid,
data->nlmsg_seq, pos,
data->dev)) {
rc = -EMSGSIZE;
break;
}
data->s_idx++;
}
return rc;
}
int ieee802154_llsec_dump_seclevels(struct sk_buff *skb,
struct netlink_callback *cb)
{
return ieee802154_llsec_dump_table(skb, cb, llsec_iter_seclevels);
}

View File

@@ -62,5 +62,21 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
[IEEE802154_ATTR_CSMA_MAX_BE] = { .type = NLA_U8, },
[IEEE802154_ATTR_FRAME_RETRIES] = { .type = NLA_S8, },
[IEEE802154_ATTR_LLSEC_ENABLED] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_SECLEVEL] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_KEY_MODE] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT] = { .type = NLA_U32, },
[IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED] = { .type = NLA_HW_ADDR, },
[IEEE802154_ATTR_LLSEC_KEY_ID] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_FRAME_COUNTER] = { .type = NLA_U32 },
[IEEE802154_ATTR_LLSEC_KEY_BYTES] = { .len = 16, },
[IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS] = { .len = 258 / 8 },
[IEEE802154_ATTR_LLSEC_FRAME_TYPE] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_CMD_FRAME_ID] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_SECLEVELS] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE] = { .type = NLA_U8, },
};

View File

@@ -36,7 +36,7 @@ struct lowpan_frag_info {
u8 d_offset;
};
struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb)
static struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb)
{
return (struct lowpan_frag_info *)skb->cb;
}
@@ -120,6 +120,8 @@ fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
struct inet_frag_queue *q;
struct lowpan_create_arg arg;
unsigned int hash;
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
arg.tag = frag_info->d_tag;
arg.d_size = frag_info->d_size;
@@ -129,7 +131,7 @@ fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
read_lock(&lowpan_frags.lock);
hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
q = inet_frag_find(&net->ieee802154_lowpan.frags,
q = inet_frag_find(&ieee802154_lowpan->frags,
&lowpan_frags, &arg, hash);
if (IS_ERR_OR_NULL(q)) {
inet_frag_maybe_warn_overflow(q, pr_fmt());
@@ -357,6 +359,8 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
struct net *net = dev_net(skb->dev);
struct lowpan_frag_info *frag_info = lowpan_cb(skb);
struct ieee802154_addr source, dest;
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
int err;
source = mac_cb(skb)->source;
@@ -366,10 +370,10 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
if (err < 0)
goto err;
if (frag_info->d_size > net->ieee802154_lowpan.max_dsize)
if (frag_info->d_size > ieee802154_lowpan->max_dsize)
goto err;
inet_frag_evictor(&net->ieee802154_lowpan.frags, &lowpan_frags, false);
inet_frag_evictor(&ieee802154_lowpan->frags, &lowpan_frags, false);
fq = fq_find(net, frag_info, &source, &dest);
if (fq != NULL) {
@@ -436,6 +440,8 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
{
struct ctl_table *table;
struct ctl_table_header *hdr;
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
table = lowpan_frags_ns_ctl_table;
if (!net_eq(net, &init_net)) {
@@ -444,10 +450,10 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
if (table == NULL)
goto err_alloc;
table[0].data = &net->ieee802154_lowpan.frags.high_thresh;
table[1].data = &net->ieee802154_lowpan.frags.low_thresh;
table[2].data = &net->ieee802154_lowpan.frags.timeout;
table[3].data = &net->ieee802154_lowpan.max_dsize;
table[0].data = &ieee802154_lowpan->frags.high_thresh;
table[1].data = &ieee802154_lowpan->frags.low_thresh;
table[2].data = &ieee802154_lowpan->frags.timeout;
table[3].data = &ieee802154_lowpan->max_dsize;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns)
@@ -458,7 +464,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
if (hdr == NULL)
goto err_reg;
net->ieee802154_lowpan.sysctl.frags_hdr = hdr;
ieee802154_lowpan->sysctl.frags_hdr = hdr;
return 0;
err_reg:
@@ -471,9 +477,11 @@ err_alloc:
static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
{
struct ctl_table *table;
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
table = net->ieee802154_lowpan.sysctl.frags_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ieee802154_lowpan.sysctl.frags_hdr);
table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
if (!net_eq(net, &init_net))
kfree(table);
}
@@ -514,20 +522,26 @@ static inline void lowpan_frags_sysctl_unregister(void)
static int __net_init lowpan_frags_init_net(struct net *net)
{
net->ieee802154_lowpan.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
net->ieee802154_lowpan.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
net->ieee802154_lowpan.frags.timeout = IPV6_FRAG_TIMEOUT;
net->ieee802154_lowpan.max_dsize = 0xFFFF;
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
inet_frags_init_net(&net->ieee802154_lowpan.frags);
ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
ieee802154_lowpan->max_dsize = 0xFFFF;
inet_frags_init_net(&ieee802154_lowpan->frags);
return lowpan_frags_ns_sysctl_register(net);
}
static void __net_exit lowpan_frags_exit_net(struct net *net)
{
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
lowpan_frags_ns_sysctl_unregister(net);
inet_frags_exit_net(&net->ieee802154_lowpan.frags, &lowpan_frags);
inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
}
static struct pernet_operations lowpan_frags_ops = {

View File

@@ -254,7 +254,6 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
struct inet_sock *inet;
struct proto *answer_prot;
unsigned char answer_flags;
char answer_no_check;
int try_loading_module = 0;
int err;
@@ -312,7 +311,6 @@ lookup_protocol:
sock->ops = answer->ops;
answer_prot = answer->prot;
answer_no_check = answer->no_check;
answer_flags = answer->flags;
rcu_read_unlock();
@@ -324,7 +322,6 @@ lookup_protocol:
goto out;
err = 0;
sk->sk_no_check = answer_no_check;
if (INET_PROTOSW_REUSE & answer_flags)
sk->sk_reuse = SK_CAN_REUSE;
@@ -1002,7 +999,6 @@ static struct inet_protosw inetsw_array[] =
.protocol = IPPROTO_TCP,
.prot = &tcp_prot,
.ops = &inet_stream_ops,
.no_check = 0,
.flags = INET_PROTOSW_PERMANENT |
INET_PROTOSW_ICSK,
},
@@ -1012,7 +1008,6 @@ static struct inet_protosw inetsw_array[] =
.protocol = IPPROTO_UDP,
.prot = &udp_prot,
.ops = &inet_dgram_ops,
.no_check = UDP_CSUM_DEFAULT,
.flags = INET_PROTOSW_PERMANENT,
},
@@ -1021,7 +1016,6 @@ static struct inet_protosw inetsw_array[] =
.protocol = IPPROTO_ICMP,
.prot = &ping_prot,
.ops = &inet_dgram_ops,
.no_check = UDP_CSUM_DEFAULT,
.flags = INET_PROTOSW_REUSE,
},
@@ -1030,7 +1024,6 @@ static struct inet_protosw inetsw_array[] =
.protocol = IPPROTO_IP, /* wild card */
.prot = &raw_prot,
.ops = &inet_sockraw_ops,
.no_check = UDP_CSUM_DEFAULT,
.flags = INET_PROTOSW_REUSE,
}
};
@@ -1261,10 +1254,12 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_IPIP |
SKB_GSO_SIT |
SKB_GSO_TCPV6 |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM |
SKB_GSO_MPLS |
0)))
goto out;
@@ -1476,22 +1471,20 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
}
EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
unsigned long snmp_fold_field(void __percpu *mib[], int offt)
unsigned long snmp_fold_field(void __percpu *mib, int offt)
{
unsigned long res = 0;
int i, j;
int i;
for_each_possible_cpu(i) {
for (j = 0; j < SNMP_ARRAY_SZ; j++)
res += *(((unsigned long *) per_cpu_ptr(mib[j], i)) + offt);
}
for_each_possible_cpu(i)
res += *(((unsigned long *) per_cpu_ptr(mib, i)) + offt);
return res;
}
EXPORT_SYMBOL_GPL(snmp_fold_field);
#if BITS_PER_LONG==32
u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
{
u64 res = 0;
int cpu;
@@ -1502,7 +1495,7 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
u64 v;
unsigned int start;
bhptr = per_cpu_ptr(mib[0], cpu);
bhptr = per_cpu_ptr(mib, cpu);
syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
do {
start = u64_stats_fetch_begin_irq(syncp);
@@ -1516,25 +1509,6 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
EXPORT_SYMBOL_GPL(snmp_fold_field64);
#endif
int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
{
BUG_ON(ptr == NULL);
ptr[0] = __alloc_percpu(mibsize, align);
if (!ptr[0])
return -ENOMEM;
#if SNMP_ARRAY_SZ == 2
ptr[1] = __alloc_percpu(mibsize, align);
if (!ptr[1]) {
free_percpu(ptr[0]);
ptr[0] = NULL;
return -ENOMEM;
}
#endif
return 0;
}
EXPORT_SYMBOL_GPL(snmp_mib_init);
#ifdef CONFIG_IP_MULTICAST
static const struct net_protocol igmp_protocol = {
.handler = igmp_rcv,
@@ -1570,40 +1544,30 @@ static __net_init int ipv4_mib_init_net(struct net *net)
{
int i;
if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
sizeof(struct tcp_mib),
__alignof__(struct tcp_mib)) < 0)
net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
if (!net->mib.tcp_statistics)
goto err_tcp_mib;
if (snmp_mib_init((void __percpu **)net->mib.ip_statistics,
sizeof(struct ipstats_mib),
__alignof__(struct ipstats_mib)) < 0)
net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
if (!net->mib.ip_statistics)
goto err_ip_mib;
for_each_possible_cpu(i) {
struct ipstats_mib *af_inet_stats;
af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[0], i);
af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
u64_stats_init(&af_inet_stats->syncp);
#if SNMP_ARRAY_SZ == 2
af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[1], i);
u64_stats_init(&af_inet_stats->syncp);
#endif
}
if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
sizeof(struct linux_mib),
__alignof__(struct linux_mib)) < 0)
net->mib.net_statistics = alloc_percpu(struct linux_mib);
if (!net->mib.net_statistics)
goto err_net_mib;
if (snmp_mib_init((void __percpu **)net->mib.udp_statistics,
sizeof(struct udp_mib),
__alignof__(struct udp_mib)) < 0)
net->mib.udp_statistics = alloc_percpu(struct udp_mib);
if (!net->mib.udp_statistics)
goto err_udp_mib;
if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics,
sizeof(struct udp_mib),
__alignof__(struct udp_mib)) < 0)
net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
if (!net->mib.udplite_statistics)
goto err_udplite_mib;
if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics,
sizeof(struct icmp_mib),
__alignof__(struct icmp_mib)) < 0)
net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
if (!net->mib.icmp_statistics)
goto err_icmp_mib;
net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
GFP_KERNEL);
@@ -1614,17 +1578,17 @@ static __net_init int ipv4_mib_init_net(struct net *net)
return 0;
err_icmpmsg_mib:
snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
free_percpu(net->mib.icmp_statistics);
err_icmp_mib:
snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
free_percpu(net->mib.udplite_statistics);
err_udplite_mib:
snmp_mib_free((void __percpu **)net->mib.udp_statistics);
free_percpu(net->mib.udp_statistics);
err_udp_mib:
snmp_mib_free((void __percpu **)net->mib.net_statistics);
free_percpu(net->mib.net_statistics);
err_net_mib:
snmp_mib_free((void __percpu **)net->mib.ip_statistics);
free_percpu(net->mib.ip_statistics);
err_ip_mib:
snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
free_percpu(net->mib.tcp_statistics);
err_tcp_mib:
return -ENOMEM;
}
@@ -1632,12 +1596,12 @@ err_tcp_mib:
static __net_exit void ipv4_mib_exit_net(struct net *net)
{
kfree(net->mib.icmpmsg_statistics);
snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
snmp_mib_free((void __percpu **)net->mib.udp_statistics);
snmp_mib_free((void __percpu **)net->mib.net_statistics);
snmp_mib_free((void __percpu **)net->mib.ip_statistics);
snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
free_percpu(net->mib.icmp_statistics);
free_percpu(net->mib.udplite_statistics);
free_percpu(net->mib.udp_statistics);
free_percpu(net->mib.net_statistics);
free_percpu(net->mib.ip_statistics);
free_percpu(net->mib.tcp_statistics);
}
static __net_initdata struct pernet_operations ipv4_mib_ops = {
@@ -1736,13 +1700,9 @@ static int __init inet_init(void)
BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
if (!sysctl_local_reserved_ports)
goto out;
rc = proto_register(&tcp_prot, 1);
if (rc)
goto out_free_reserved_ports;
goto out;
rc = proto_register(&udp_prot, 1);
if (rc)
@@ -1852,8 +1812,6 @@ out_unregister_udp_proto:
proto_unregister(&udp_prot);
out_unregister_tcp_proto:
proto_unregister(&tcp_prot);
out_free_reserved_ports:
kfree(sysctl_local_reserved_ports);
goto out;
}

View File

@@ -86,18 +86,26 @@ out:
}
EXPORT_SYMBOL(ip4_datagram_connect);
/* Because UDP xmit path can manipulate sk_dst_cache without holding
* socket lock, we need to use sk_dst_set() here,
* even if we own the socket lock.
*/
void ip4_datagram_release_cb(struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const struct ip_options_rcu *inet_opt;
__be32 daddr = inet->inet_daddr;
struct dst_entry *dst;
struct flowi4 fl4;
struct rtable *rt;
if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
return;
rcu_read_lock();
dst = __sk_dst_get(sk);
if (!dst || !dst->obsolete || dst->ops->check(dst, 0)) {
rcu_read_unlock();
return;
}
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
@@ -105,8 +113,10 @@ void ip4_datagram_release_cb(struct sock *sk)
inet->inet_saddr, inet->inet_dport,
inet->inet_sport, sk->sk_protocol,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
if (!IS_ERR(rt))
__sk_dst_set(sk, &rt->dst);
dst = !IS_ERR(rt) ? &rt->dst : NULL;
sk_dst_set(sk, dst);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);

View File

@@ -106,7 +106,6 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
#define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
static DEFINE_SPINLOCK(inet_addr_hash_lock);
static u32 inet_addr_hash(struct net *net, __be32 addr)
{
@@ -119,16 +118,14 @@ static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
{
u32 hash = inet_addr_hash(net, ifa->ifa_local);
spin_lock(&inet_addr_hash_lock);
ASSERT_RTNL();
hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
spin_unlock(&inet_addr_hash_lock);
}
static void inet_hash_remove(struct in_ifaddr *ifa)
{
spin_lock(&inet_addr_hash_lock);
ASSERT_RTNL();
hlist_del_init_rcu(&ifa->hash);
spin_unlock(&inet_addr_hash_lock);
}
/**
@@ -830,7 +827,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
ifa_existing = find_matching_ifa(ifa);
if (!ifa_existing) {
/* It would be best to check for !NLM_F_CREATE here but
* userspace alreay relies on not having to provide this.
* userspace already relies on not having to provide this.
*/
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);

View File

@@ -84,7 +84,8 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
ptr--;
}
if (tpi->flags&TUNNEL_CSUM &&
!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE)) {
!(skb_shinfo(skb)->gso_type &
(SKB_GSO_GRE|SKB_GSO_GRE_CSUM))) {
*ptr = 0;
*(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
skb->len, 0));
@@ -93,28 +94,6 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
}
EXPORT_SYMBOL_GPL(gre_build_header);
static __sum16 check_checksum(struct sk_buff *skb)
{
__sum16 csum = 0;
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
csum = csum_fold(skb->csum);
if (!csum)
break;
/* Fall through. */
case CHECKSUM_NONE:
skb->csum = 0;
csum = __skb_checksum_complete(skb);
skb->ip_summed = CHECKSUM_COMPLETE;
break;
}
return csum;
}
static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err)
{
@@ -141,7 +120,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
options = (__be32 *)(greh + 1);
if (greh->flags & GRE_CSUM) {
if (check_checksum(skb)) {
if (skb_checksum_simple_validate(skb)) {
*csum_err = true;
return -EINVAL;
}

View File

@@ -42,6 +42,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_IPIP)))
goto out;
@@ -55,6 +56,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
goto out;
csum = !!(greh->flags & GRE_CSUM);
if (csum)
skb->encap_hdr_csum = 1;
if (unlikely(!pskb_may_pull(skb, ghl)))
goto out;
@@ -94,10 +97,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
}
}
greh = (struct gre_base_hdr *)(skb->data);
skb_reset_transport_header(skb);
greh = (struct gre_base_hdr *)
skb_transport_header(skb);
pcsum = (__be32 *)(greh + 1);
*pcsum = 0;
*(__sum16 *)pcsum = csum_fold(skb_checksum(skb, 0, skb->len, 0));
*(__sum16 *)pcsum = gso_make_checksum(skb, 0);
}
__skb_push(skb, tnl_hlen - ghl);
@@ -125,10 +131,12 @@ static __sum16 gro_skb_checksum(struct sk_buff *skb)
csum_partial(skb->data, skb_gro_offset(skb), 0));
sum = csum_fold(NAPI_GRO_CB(skb)->csum);
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) {
if (unlikely(!sum))
if (unlikely(!sum) && !skb->csum_complete_sw)
netdev_rx_csum_fault(skb->dev);
} else
} else {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum_complete_sw = 1;
}
return sum;
}

View File

@@ -337,6 +337,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
struct sock *sk;
struct inet_sock *inet;
__be32 daddr, saddr;
u32 mark = IP4_REPLY_MARK(net, skb->mark);
if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
return;
@@ -349,6 +350,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
icmp_param->data.icmph.checksum = 0;
inet->tos = ip_hdr(skb)->tos;
sk->sk_mark = mark;
daddr = ipc.addr = ip_hdr(skb)->saddr;
saddr = fib_compute_spec_dst(skb);
ipc.opt = NULL;
@@ -364,6 +366,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = daddr;
fl4.saddr = saddr;
fl4.flowi4_mark = mark;
fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
fl4.flowi4_proto = IPPROTO_ICMP;
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
@@ -382,7 +385,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
struct flowi4 *fl4,
struct sk_buff *skb_in,
const struct iphdr *iph,
__be32 saddr, u8 tos,
__be32 saddr, u8 tos, u32 mark,
int type, int code,
struct icmp_bxm *param)
{
@@ -394,6 +397,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
fl4->daddr = (param->replyopts.opt.opt.srr ?
param->replyopts.opt.opt.faddr : iph->saddr);
fl4->saddr = saddr;
fl4->flowi4_mark = mark;
fl4->flowi4_tos = RT_TOS(tos);
fl4->flowi4_proto = IPPROTO_ICMP;
fl4->fl4_icmp_type = type;
@@ -491,6 +495,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
struct flowi4 fl4;
__be32 saddr;
u8 tos;
u32 mark;
struct net *net;
struct sock *sk;
@@ -592,6 +597,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
IPTOS_PREC_INTERNETCONTROL) :
iph->tos;
mark = IP4_REPLY_MARK(net, skb_in->mark);
if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb_in))
goto out_unlock;
@@ -608,13 +614,14 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
icmp_param->skb = skb_in;
icmp_param->offset = skb_network_offset(skb_in);
inet_sk(sk)->tos = tos;
sk->sk_mark = mark;
ipc.addr = iph->saddr;
ipc.opt = &icmp_param->replyopts.opt;
ipc.tx_flags = 0;
ipc.ttl = 0;
ipc.tos = -1;
rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos,
rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
type, code, icmp_param);
if (IS_ERR(rt))
goto out_unlock;
@@ -908,16 +915,8 @@ int icmp_rcv(struct sk_buff *skb)
ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS);
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (!csum_fold(skb->csum))
break;
/* fall through */
case CHECKSUM_NONE:
skb->csum = 0;
if (__skb_checksum_complete(skb))
goto csum_error;
}
if (skb_checksum_simple_validate(skb))
goto csum_error;
if (!pskb_pull(skb, sizeof(*icmph)))
goto error;

View File

@@ -369,7 +369,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
pip->saddr = fl4.saddr;
pip->protocol = IPPROTO_IGMP;
pip->tot_len = 0; /* filled in later */
ip_select_ident(skb, &rt->dst, NULL);
ip_select_ident(skb, NULL);
((u8 *)&pip[1])[0] = IPOPT_RA;
((u8 *)&pip[1])[1] = 4;
((u8 *)&pip[1])[2] = 0;
@@ -714,7 +714,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
iph->daddr = dst;
iph->saddr = fl4.saddr;
iph->protocol = IPPROTO_IGMP;
ip_select_ident(skb, &rt->dst, NULL);
ip_select_ident(skb, NULL);
((u8 *)&iph[1])[0] = IPOPT_RA;
((u8 *)&iph[1])[1] = 4;
((u8 *)&iph[1])[2] = 0;
@@ -988,16 +988,8 @@ int igmp_rcv(struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
goto drop;
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (!csum_fold(skb->csum))
break;
/* fall through */
case CHECKSUM_NONE:
skb->csum = 0;
if (__skb_checksum_complete(skb))
goto drop;
}
if (skb_checksum_simple_validate(skb))
goto drop;
ih = igmp_hdr(skb);
switch (ih->type) {

View File

@@ -29,9 +29,6 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
EXPORT_SYMBOL(inet_csk_timer_bug_msg);
#endif
unsigned long *sysctl_local_reserved_ports;
EXPORT_SYMBOL(sysctl_local_reserved_ports);
void inet_get_local_port_range(struct net *net, int *low, int *high)
{
unsigned int seq;
@@ -113,7 +110,7 @@ again:
smallest_size = -1;
do {
if (inet_is_reserved_local_port(rover))
if (inet_is_local_reserved_port(net, rover))
goto next_nolock;
head = &hashinfo->bhash[inet_bhashfn(net, rover,
hashinfo->bhash_size)];
@@ -408,7 +405,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
struct net *net = sock_net(sk);
int flags = inet_sk_flowi_flags(sk);
flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
flowi4_init_output(fl4, sk->sk_bound_dev_if, ireq->ir_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol,
flags,
@@ -445,7 +442,7 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
rcu_read_lock();
opt = rcu_dereference(newinet->inet_opt);
flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
flowi4_init_output(fl4, sk->sk_bound_dev_if, inet_rsk(req)->ir_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol, inet_sk_flowi_flags(sk),
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
@@ -680,6 +677,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
newsk->sk_write_space = sk_stream_write_space;
newsk->sk_mark = inet_rsk(req)->ir_mark;
newicsk->icsk_retransmits = 0;
newicsk->icsk_backoff = 0;
newicsk->icsk_probes_out = 0;

View File

@@ -274,7 +274,7 @@ struct sock *__inet_lookup_established(struct net *net,
const __be32 daddr, const u16 hnum,
const int dif)
{
INET_ADDR_COOKIE(acookie, saddr, daddr)
INET_ADDR_COOKIE(acookie, saddr, daddr);
const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
struct sock *sk;
const struct hlist_nulls_node *node;
@@ -327,7 +327,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
__be32 daddr = inet->inet_rcv_saddr;
__be32 saddr = inet->inet_daddr;
int dif = sk->sk_bound_dev_if;
INET_ADDR_COOKIE(acookie, saddr, daddr)
INET_ADDR_COOKIE(acookie, saddr, daddr);
const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
struct net *net = sock_net(sk);
unsigned int hash = inet_ehashfn(net, daddr, lport,
@@ -500,7 +500,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
local_bh_disable();
for (i = 1; i <= remaining; i++) {
port = low + (i + offset) % remaining;
if (inet_is_reserved_local_port(port))
if (inet_is_local_reserved_port(net, port))
continue;
head = &hinfo->bhash[inet_bhashfn(net, port,
hinfo->bhash_size)];

View File

@@ -26,20 +26,7 @@
* Theory of operations.
* We keep one entry for each peer IP address. The nodes contains long-living
* information about the peer which doesn't depend on routes.
* At this moment this information consists only of ID field for the next
* outgoing IP packet. This field is incremented with each packet as encoded
* in inet_getid() function (include/net/inetpeer.h).
* At the moment of writing this notes identifier of IP packets is generated
* to be unpredictable using this code only for packets subjected
* (actually or potentially) to defragmentation. I.e. DF packets less than
* PMTU in size when local fragmentation is disabled use a constant ID and do
* not use this code (see ip_select_ident() in include/net/ip.h).
*
* Route cache entries hold references to our nodes.
* New cache entries get references via lookup by destination IP address in
* the avl tree. The reference is grabbed only when it's needed i.e. only
* when we try to output IP packet which needs an unpredictable ID (see
* __ip_select_ident() in net/ipv4/route.c).
* Nodes are removed only when reference counter goes to 0.
* When it's happened the node may be removed when a sufficient amount of
* time has been passed since its last use. The less-recently-used entry can
@@ -62,7 +49,6 @@
* refcnt: atomically against modifications on other CPU;
* usually under some other lock to prevent node disappearing
* daddr: unchangeable
* ip_id_count: atomic value (no lock needed)
*/
static struct kmem_cache *peer_cachep __read_mostly;
@@ -120,7 +106,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
static void inetpeer_gc_worker(struct work_struct *work)
{
struct inet_peer *p, *n, *c;
LIST_HEAD(list);
struct list_head list;
spin_lock_bh(&gc_lock);
list_replace_init(&gc_list, &list);
@@ -497,10 +483,6 @@ relookup:
p->daddr = *daddr;
atomic_set(&p->refcnt, 1);
atomic_set(&p->rid, 0);
atomic_set(&p->ip_id_count,
(daddr->family == AF_INET) ?
secure_ip_id(daddr->addr.a4) :
secure_ipv6_id(daddr->addr.a6));
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
p->rate_tokens = 0;
/* 60*HZ is arbitrary, but chosen enough high so that the first

View File

@@ -42,7 +42,7 @@
static bool ip_may_fragment(const struct sk_buff *skb)
{
return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
skb->local_df;
skb->ignore_df;
}
static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)

View File

@@ -410,7 +410,7 @@ static int ipgre_open(struct net_device *dev)
struct flowi4 fl4;
struct rtable *rt;
rt = ip_route_output_gre(dev_net(dev), &fl4,
rt = ip_route_output_gre(t->net, &fl4,
t->parms.iph.daddr,
t->parms.iph.saddr,
t->parms.o_key,
@@ -434,7 +434,7 @@ static int ipgre_close(struct net_device *dev)
if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
struct in_device *in_dev;
in_dev = inetdev_by_index(dev_net(dev), t->mlink);
in_dev = inetdev_by_index(t->net, t->mlink);
if (in_dev)
ip_mc_dec_group(in_dev, t->parms.iph.daddr);
}
@@ -478,7 +478,7 @@ static void __gre_tunnel_init(struct net_device *dev)
dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
dev->features |= NETIF_F_NETNS_LOCAL | GRE_FEATURES;
dev->features |= GRE_FEATURES;
dev->hw_features |= GRE_FEATURES;
if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
@@ -649,6 +649,7 @@ static void ipgre_tap_setup(struct net_device *dev)
{
ether_setup(dev);
dev->netdev_ops = &gre_tap_netdev_ops;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
ip_tunnel_setup(dev, gre_tap_net_id);
}

View File

@@ -364,7 +364,7 @@ int ip_options_compile(struct net *net,
}
if (optptr[2] <= optlen) {
unsigned char *timeptr = NULL;
if (optptr[2]+3 > optptr[1]) {
if (optptr[2]+3 > optlen) {
pp_ptr = optptr + 2;
goto error;
}
@@ -376,7 +376,7 @@ int ip_options_compile(struct net *net,
optptr[2] += 4;
break;
case IPOPT_TS_TSANDADDR:
if (optptr[2]+7 > optptr[1]) {
if (optptr[2]+7 > optlen) {
pp_ptr = optptr + 2;
goto error;
}
@@ -390,7 +390,7 @@ int ip_options_compile(struct net *net,
optptr[2] += 8;
break;
case IPOPT_TS_PRESPEC:
if (optptr[2]+7 > optptr[1]) {
if (optptr[2]+7 > optlen) {
pp_ptr = optptr + 2;
goto error;
}

View File

@@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
iph->saddr = saddr;
iph->protocol = sk->sk_protocol;
ip_select_ident(skb, &rt->dst, sk);
ip_select_ident(skb, sk);
if (opt && opt->opt.optlen) {
iph->ihl += opt->opt.optlen>>2;
@@ -415,7 +415,7 @@ packet_routed:
skb_reset_network_header(skb);
iph = ip_hdr(skb);
*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
@@ -430,8 +430,7 @@ packet_routed:
ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
}
ip_select_ident_more(skb, &rt->dst, sk,
(skb_shinfo(skb)->gso_segs ?: 1) - 1);
ip_select_ident_segs(skb, sk, skb_shinfo(skb)->gso_segs ?: 1);
/* TODO : should we use skb->sk here instead of sk ? */
skb->priority = sk->sk_priority;
@@ -501,7 +500,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
iph = ip_hdr(skb);
mtu = ip_skb_dst_mtu(skb);
if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->local_df) ||
if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
(IPCB(skb)->frag_max_size &&
IPCB(skb)->frag_max_size > mtu))) {
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
@@ -866,7 +865,7 @@ static int __ip_append_data(struct sock *sk,
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
maxnonfragsize = ip_sk_local_df(sk) ? 0xFFFF : mtu;
maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
if (cork->length + length > maxnonfragsize - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -1189,7 +1188,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
maxnonfragsize = ip_sk_local_df(sk) ? 0xFFFF : mtu;
maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
if (cork->length + size > maxnonfragsize - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -1350,10 +1349,10 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
* to fragment the frame generated here. No matter, what transforms
* how transforms change size of the packet, it will come out.
*/
skb->local_df = ip_sk_local_df(sk);
skb->ignore_df = ip_sk_ignore_df(sk);
/* DF bit is set when we want to see DF on outgoing frames.
* If local_df is set too, we still allow to fragment this frame
* If ignore_df is set too, we still allow to fragment this frame
* locally. */
if (inet->pmtudisc == IP_PMTUDISC_DO ||
inet->pmtudisc == IP_PMTUDISC_PROBE ||
@@ -1379,7 +1378,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
iph->ttl = ttl;
iph->protocol = sk->sk_protocol;
ip_copy_addrs(iph, fl4);
ip_select_ident(skb, &rt->dst, sk);
ip_select_ident(skb, sk);
if (opt) {
iph->ihl += opt->optlen>>2;
@@ -1546,7 +1545,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
daddr = replyopts.opt.opt.faddr;
}
flowi4_init_output(&fl4, arg->bound_dev_if, 0,
flowi4_init_output(&fl4, arg->bound_dev_if,
IP4_REPLY_MARK(net, skb->mark),
RT_TOS(arg->tos),
RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
ip_reply_arg_flowi_flags(arg),

View File

@@ -268,6 +268,7 @@ static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
__be32 key = parms->i_key;
__be16 flags = parms->i_flags;
int link = parms->link;
struct ip_tunnel *t = NULL;
struct hlist_head *head = ip_bucket(itn, parms);
@@ -275,9 +276,9 @@ static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
hlist_for_each_entry_rcu(t, head, hash_node) {
if (local == t->parms.iph.saddr &&
remote == t->parms.iph.daddr &&
key == t->parms.i_key &&
link == t->parms.link &&
type == t->dev->type)
type == t->dev->type &&
ip_tunnel_key_match(&t->parms, flags, key))
break;
}
return t;
@@ -395,11 +396,10 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
struct ip_tunnel_net *itn,
struct ip_tunnel_parm *parms)
{
struct ip_tunnel *nt, *fbt;
struct ip_tunnel *nt;
struct net_device *dev;
BUG_ON(!itn->fb_tunnel_dev);
fbt = netdev_priv(itn->fb_tunnel_dev);
dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
if (IS_ERR(dev))
return ERR_CAST(dev);
@@ -668,6 +668,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
dev->needed_headroom = max_headroom;
if (skb_cow_head(skb, dev->needed_headroom)) {
ip_rt_put(rt);
dev->stats.tx_dropped++;
kfree_skb(skb);
return;
@@ -747,19 +748,19 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
goto done;
if (p->iph.ttl)
p->iph.frag_off |= htons(IP_DF);
if (!(p->i_flags&TUNNEL_KEY))
p->i_key = 0;
if (!(p->o_flags&TUNNEL_KEY))
p->o_key = 0;
if (!(p->i_flags & VTI_ISVTI)) {
if (!(p->i_flags & TUNNEL_KEY))
p->i_key = 0;
if (!(p->o_flags & TUNNEL_KEY))
p->o_key = 0;
}
t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
if (!t && (cmd == SIOCADDTUNNEL)) {
t = ip_tunnel_create(net, itn, p);
if (IS_ERR(t)) {
err = PTR_ERR(t);
break;
}
err = PTR_ERR_OR_ZERO(t);
break;
}
if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
if (t != NULL) {

View File

@@ -74,7 +74,7 @@ int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
iph->daddr = dst;
iph->saddr = src;
iph->ttl = ttl;
__ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
__ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1);
err = ip_local_out_sk(sk, skb);
if (unlikely(net_xmit_eval(err)))
@@ -135,6 +135,14 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
return skb;
}
/* If packet is not gso and we are resolving any partial checksum,
* clear encapsulation flag. This allows setting CHECKSUM_PARTIAL
* on the outer header without confusing devices that implement
* NETIF_F_IP_CSUM with encapsulation.
*/
if (csum_help)
skb->encapsulation = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) {
err = skb_checksum_help(skb);
if (unlikely(err))

View File

@@ -313,7 +313,13 @@ vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EINVAL;
}
p.i_flags |= VTI_ISVTI;
if (!(p.i_flags & GRE_KEY))
p.i_key = 0;
if (!(p.o_flags & GRE_KEY))
p.o_key = 0;
p.i_flags = VTI_ISVTI;
err = ip_tunnel_ioctl(dev, &p, cmd);
if (err)
return err;

View File

@@ -149,13 +149,13 @@ static int ipip_err(struct sk_buff *skb, u32 info)
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
ipv4_update_pmtu(skb, dev_net(skb->dev), info,
t->dev->ifindex, 0, IPPROTO_IPIP, 0);
t->parms.link, 0, IPPROTO_IPIP, 0);
err = 0;
goto out;
}
if (type == ICMP_REDIRECT) {
ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
IPPROTO_IPIP, 0);
err = 0;
goto out;
@@ -486,4 +486,5 @@ static void __exit ipip_fini(void)
module_init(ipip_init);
module_exit(ipip_fini);
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("ipip");
MODULE_ALIAS_NETDEV("tunl0");

View File

@@ -484,7 +484,7 @@ static void reg_vif_setup(struct net_device *dev)
dev->type = ARPHRD_PIMREG;
dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
dev->flags = IFF_NOARP;
dev->netdev_ops = &reg_vif_netdev_ops,
dev->netdev_ops = &reg_vif_netdev_ops;
dev->destructor = free_netdev;
dev->features |= NETIF_F_NETNS_LOCAL;
}
@@ -1663,7 +1663,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
iph->protocol = IPPROTO_IPIP;
iph->ihl = 5;
iph->tot_len = htons(skb->len);
ip_select_ident(skb, skb_dst(skb), NULL);
ip_select_ident(skb, NULL);
ip_send_check(iph);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));

View File

@@ -91,17 +91,9 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops,
if (nf_ct_is_untracked(ct))
return NF_ACCEPT;
nat = nfct_nat(ct);
if (!nat) {
/* NAT module was loaded late. */
if (nf_ct_is_confirmed(ct))
return NF_ACCEPT;
nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
if (nat == NULL) {
pr_debug("failed to add NAT extension\n");
return NF_ACCEPT;
}
}
nat = nf_ct_nat_ext_add(ct);
if (nat == NULL)
return NF_ACCEPT;
switch (ctinfo) {
case IP_CT_RELATED:

View File

@@ -34,7 +34,7 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
if (!err) {
ip_send_check(ip_hdr(skb));
skb->local_df = 1;
skb->ignore_df = 1;
}
return err;

View File

@@ -48,15 +48,9 @@ static unsigned int nf_nat_fn(const struct nf_hook_ops *ops,
NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)));
nat = nfct_nat(ct);
if (nat == NULL) {
/* Conntrack module was loaded late, can't add extension. */
if (nf_ct_is_confirmed(ct))
return NF_ACCEPT;
nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
if (nat == NULL)
return NF_ACCEPT;
}
nat = nf_ct_nat_ext_add(ct);
if (nat == NULL)
return NF_ACCEPT;
switch (ctinfo) {
case IP_CT_RELATED:

View File

@@ -345,15 +345,15 @@ static void icmp_put(struct seq_file *seq)
for (i = 0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " Out%s", icmpmibmap[i].name);
seq_printf(seq, "\nIcmp: %lu %lu %lu",
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INMSGS),
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS),
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS));
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INMSGS),
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INERRORS),
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS));
for (i = 0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " %lu",
atomic_long_read(ptr + icmpmibmap[i].index));
seq_printf(seq, " %lu %lu",
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
for (i = 0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " %lu",
atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
@@ -379,7 +379,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
seq_printf(seq, " %llu",
snmp_fold_field64((void __percpu **)net->mib.ip_statistics,
snmp_fold_field64(net->mib.ip_statistics,
snmp4_ipstats_list[i].entry,
offsetof(struct ipstats_mib, syncp)));
@@ -395,11 +395,11 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
/* MaxConn field is signed, RFC 2012 */
if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
seq_printf(seq, " %ld",
snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
snmp_fold_field(net->mib.tcp_statistics,
snmp4_tcp_list[i].entry));
else
seq_printf(seq, " %lu",
snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
snmp_fold_field(net->mib.tcp_statistics,
snmp4_tcp_list[i].entry));
}
@@ -410,7 +410,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nUdp:");
for (i = 0; snmp4_udp_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
snmp_fold_field((void __percpu **)net->mib.udp_statistics,
snmp_fold_field(net->mib.udp_statistics,
snmp4_udp_list[i].entry));
/* the UDP and UDP-Lite MIBs are the same */
@@ -421,7 +421,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nUdpLite:");
for (i = 0; snmp4_udp_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
snmp_fold_field((void __percpu **)net->mib.udplite_statistics,
snmp_fold_field(net->mib.udplite_statistics,
snmp4_udp_list[i].entry));
seq_putc(seq, '\n');
@@ -458,7 +458,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nTcpExt:");
for (i = 0; snmp4_net_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
snmp_fold_field((void __percpu **)net->mib.net_statistics,
snmp_fold_field(net->mib.net_statistics,
snmp4_net_list[i].entry));
seq_puts(seq, "\nIpExt:");
@@ -468,7 +468,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nIpExt:");
for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
seq_printf(seq, " %llu",
snmp_fold_field64((void __percpu **)net->mib.ip_statistics,
snmp_fold_field64(net->mib.ip_statistics,
snmp4_ipextstats_list[i].entry,
offsetof(struct ipstats_mib, syncp)));

View File

@@ -389,7 +389,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
iph->check = 0;
iph->tot_len = htons(length);
if (!iph->id)
ip_select_ident(skb, &rt->dst, NULL);
ip_select_ident(skb, NULL);
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}

View File

@@ -89,6 +89,7 @@
#include <linux/rcupdate.h>
#include <linux/times.h>
#include <linux/slab.h>
#include <linux/jhash.h>
#include <net/dst.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
@@ -456,39 +457,19 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
return neigh_create(&arp_tbl, pkey, dev);
}
/*
* Peer allocation may fail only in serious out-of-memory conditions. However
* we still can generate some output.
* Random ID selection looks a bit dangerous because we have no chances to
* select ID being unique in a reasonable period of time.
* But broken packet identifier may be better than no packet at all.
*/
static void ip_select_fb_ident(struct iphdr *iph)
atomic_t *ip_idents __read_mostly;
EXPORT_SYMBOL(ip_idents);
void __ip_select_ident(struct iphdr *iph, int segs)
{
static DEFINE_SPINLOCK(ip_fb_id_lock);
static u32 ip_fallback_id;
u32 salt;
static u32 ip_idents_hashrnd __read_mostly;
u32 hash, id;
spin_lock_bh(&ip_fb_id_lock);
salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
iph->id = htons(salt & 0xFFFF);
ip_fallback_id = salt;
spin_unlock_bh(&ip_fb_id_lock);
}
net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
{
struct net *net = dev_net(dst->dev);
struct inet_peer *peer;
peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
if (peer) {
iph->id = htons(inet_getid(peer, more));
inet_putpeer(peer);
return;
}
ip_select_fb_ident(iph);
hash = jhash_1word((__force u32)iph->daddr, ip_idents_hashrnd);
id = ip_idents_reserve(hash, segs);
iph->id = htons(id);
}
EXPORT_SYMBOL(__ip_select_ident);
@@ -993,6 +974,9 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
struct flowi4 fl4;
struct rtable *rt;
if (!mark)
mark = IP4_REPLY_MARK(net, skb->mark);
__build_flow_key(&fl4, NULL, iph, oif,
RT_TOS(iph->tos), protocol, mark, flow_flags);
rt = __ip_route_output_key(net, &fl4);
@@ -1010,6 +994,10 @@ static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
struct rtable *rt;
__build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
if (!fl4.flowi4_mark)
fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
rt = __ip_route_output_key(sock_net(sk), &fl4);
if (!IS_ERR(rt)) {
__ip_rt_update_pmtu(rt, &fl4, mtu);
@@ -2704,6 +2692,12 @@ int __init ip_rt_init(void)
{
int rc = 0;
ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
if (!ip_idents)
panic("IP: failed to allocate ip_idents\n");
prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
#ifdef CONFIG_IP_ROUTE_CLASSID
ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
if (!ip_rt_acct)

View File

@@ -303,6 +303,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
ireq->ir_rmt_port = th->source;
ireq->ir_loc_addr = ip_hdr(skb)->daddr;
ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
ireq->ir_mark = inet_request_mark(sk, skb);
ireq->ecn_ok = ecn_ok;
ireq->snd_wscale = tcp_opt.snd_wscale;
ireq->sack_ok = tcp_opt.sack_ok;
@@ -339,7 +340,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
* hasn't changed since we received the original syn, but I see
* no easy way to do this.
*/
flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
flowi4_init_output(&fl4, sk->sk_bound_dev_if, ireq->ir_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
inet_sk_flowi_flags(sk),
(opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr,

Some files were not shown because too many files have changed in this diff Show More