Merge tag 'linux-can-next-for-5.4-20190904' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next

Marc Kleine-Budde says:

====================
pull-request: can-next 2019-09-04 j1939

this is a pull request for net-next/master consisting of 21 patches.

the first 12 patches are by me and target the CAN core infrastructure.
They clean up the names of variables , structs and struct members,
convert can_rx_register() to use max() instead of open coding it and
remove unneeded code from the can_pernet_exit() callback.

The next three patches are also by me and they introduce and make use of
the CAN midlayer private structure. It is used to hold protocol specific
per device data structures.

The next patch is by Oleksij Rempel, switches the
&net->can.rcvlists_lock from a spin_lock() to a spin_lock_bh(), so that
it can be used from NAPI (soft IRQ) context.

The next 4 patches are by Kurt Van Dijck, he first updates his email
address via mailmap and then extends sockaddr_can to include j1939
members.

The final patch is the collective effort of many entities (The j1939
authors: Oliver Hartkopp, Bastian Stender, Elenita Hinds, kbuild test
robot, Kurt Van Dijck, Maxime Jayat, Robin van der Gracht, Oleksij
Rempel, Marc Kleine-Budde). It adds support of SAE J1939 protocol to the
CAN networking stack.

SAE J1939 is the vehicle bus recommended practice used for communication
and diagnostics among vehicle components. Originating in the car and
heavy-duty truck industry in the United States, it is now widely used in
other parts of the world.

P.S.: This pull request doesn't invalidate my last pull request:
      "pull-request: can-next 2019-09-03".
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2019-09-05 12:17:50 +02:00
28 changed files with 5398 additions and 298 deletions

View File

@@ -53,6 +53,8 @@ config CAN_GW
They can be modified with AND/OR/XOR/SET operations as configured
by the netlink configuration interface known e.g. from iptables.
source "net/can/j1939/Kconfig"
source "drivers/net/can/Kconfig"
endif

View File

@@ -15,3 +15,5 @@ can-bcm-y := bcm.o
obj-$(CONFIG_CAN_GW) += can-gw.o
can-gw-y := gw.o
obj-$(CONFIG_CAN_J1939) += j1939/

View File

@@ -58,6 +58,7 @@
#include <linux/can.h>
#include <linux/can/core.h>
#include <linux/can/skb.h>
#include <linux/can/can-ml.h>
#include <linux/ratelimit.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -198,7 +199,7 @@ int can_send(struct sk_buff *skb, int loop)
{
struct sk_buff *newskb = NULL;
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct s_stats *can_stats = dev_net(skb->dev)->can.can_stats;
struct can_pkg_stats *pkg_stats = dev_net(skb->dev)->can.pkg_stats;
int err = -EINVAL;
if (skb->len == CAN_MTU) {
@@ -285,8 +286,8 @@ int can_send(struct sk_buff *skb, int loop)
netif_rx_ni(newskb);
/* update statistics */
can_stats->tx_frames++;
can_stats->tx_frames_delta++;
pkg_stats->tx_frames++;
pkg_stats->tx_frames_delta++;
return 0;
@@ -298,13 +299,15 @@ EXPORT_SYMBOL(can_send);
/* af_can rx path */
static struct can_dev_rcv_lists *find_dev_rcv_lists(struct net *net,
struct net_device *dev)
static struct can_dev_rcv_lists *can_dev_rcv_lists_find(struct net *net,
struct net_device *dev)
{
if (!dev)
return net->can.can_rx_alldev_list;
else
return (struct can_dev_rcv_lists *)dev->ml_priv;
if (dev) {
struct can_ml_priv *ml_priv = dev->ml_priv;
return &ml_priv->dev_rcv_lists;
} else {
return net->can.rx_alldev_list;
}
}
/**
@@ -331,7 +334,7 @@ static unsigned int effhash(canid_t can_id)
}
/**
* find_rcv_list - determine optimal filterlist inside device filter struct
* can_rcv_list_find - determine optimal filterlist inside device filter struct
* @can_id: pointer to CAN identifier of a given can_filter
* @mask: pointer to CAN mask of a given can_filter
* @d: pointer to the device filter struct
@@ -357,8 +360,8 @@ static unsigned int effhash(canid_t can_id)
* Constistency checked mask.
* Reduced can_id to have a preprocessed filter compare value.
*/
static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
struct can_dev_rcv_lists *d)
static struct hlist_head *can_rcv_list_find(canid_t *can_id, canid_t *mask,
struct can_dev_rcv_lists *dev_rcv_lists)
{
canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
@@ -366,7 +369,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
if (*mask & CAN_ERR_FLAG) {
/* clear CAN_ERR_FLAG in filter entry */
*mask &= CAN_ERR_MASK;
return &d->rx[RX_ERR];
return &dev_rcv_lists->rx[RX_ERR];
}
/* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */
@@ -382,26 +385,26 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
/* inverse can_id/can_mask filter */
if (inv)
return &d->rx[RX_INV];
return &dev_rcv_lists->rx[RX_INV];
/* mask == 0 => no condition testing at receive time */
if (!(*mask))
return &d->rx[RX_ALL];
return &dev_rcv_lists->rx[RX_ALL];
/* extra filterlists for the subscription of a single non-RTR can_id */
if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) &&
!(*can_id & CAN_RTR_FLAG)) {
if (*can_id & CAN_EFF_FLAG) {
if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS))
return &d->rx_eff[effhash(*can_id)];
return &dev_rcv_lists->rx_eff[effhash(*can_id)];
} else {
if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
return &d->rx_sff[*can_id];
return &dev_rcv_lists->rx_sff[*can_id];
}
}
/* default: filter via can_id/can_mask */
return &d->rx[RX_FIL];
return &dev_rcv_lists->rx[RX_FIL];
}
/**
@@ -438,10 +441,10 @@ int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id,
canid_t mask, void (*func)(struct sk_buff *, void *),
void *data, char *ident, struct sock *sk)
{
struct receiver *r;
struct hlist_head *rl;
struct can_dev_rcv_lists *d;
struct s_pstats *can_pstats = net->can.can_pstats;
struct receiver *rcv;
struct hlist_head *rcv_list;
struct can_dev_rcv_lists *dev_rcv_lists;
struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
int err = 0;
/* insert new receiver (dev,canid,mask) -> (func,data) */
@@ -452,36 +455,30 @@ int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id,
if (dev && !net_eq(net, dev_net(dev)))
return -ENODEV;
r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
if (!r)
rcv = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
if (!rcv)
return -ENOMEM;
spin_lock(&net->can.can_rcvlists_lock);
spin_lock_bh(&net->can.rcvlists_lock);
d = find_dev_rcv_lists(net, dev);
if (d) {
rl = find_rcv_list(&can_id, &mask, d);
dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
rcv_list = can_rcv_list_find(&can_id, &mask, dev_rcv_lists);
r->can_id = can_id;
r->mask = mask;
r->matches = 0;
r->func = func;
r->data = data;
r->ident = ident;
r->sk = sk;
rcv->can_id = can_id;
rcv->mask = mask;
rcv->matches = 0;
rcv->func = func;
rcv->data = data;
rcv->ident = ident;
rcv->sk = sk;
hlist_add_head_rcu(&r->list, rl);
d->entries++;
hlist_add_head_rcu(&rcv->list, rcv_list);
dev_rcv_lists->entries++;
can_pstats->rcv_entries++;
if (can_pstats->rcv_entries_max < can_pstats->rcv_entries)
can_pstats->rcv_entries_max = can_pstats->rcv_entries;
} else {
kmem_cache_free(rcv_cache, r);
err = -ENODEV;
}
spin_unlock(&net->can.can_rcvlists_lock);
rcv_lists_stats->rcv_entries++;
rcv_lists_stats->rcv_entries_max = max(rcv_lists_stats->rcv_entries_max,
rcv_lists_stats->rcv_entries);
spin_unlock_bh(&net->can.rcvlists_lock);
return err;
}
@@ -490,10 +487,10 @@ EXPORT_SYMBOL(can_rx_register);
/* can_rx_delete_receiver - rcu callback for single receiver entry removal */
static void can_rx_delete_receiver(struct rcu_head *rp)
{
struct receiver *r = container_of(rp, struct receiver, rcu);
struct sock *sk = r->sk;
struct receiver *rcv = container_of(rp, struct receiver, rcu);
struct sock *sk = rcv->sk;
kmem_cache_free(rcv_cache, r);
kmem_cache_free(rcv_cache, rcv);
if (sk)
sock_put(sk);
}
@@ -513,10 +510,10 @@ void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id,
canid_t mask, void (*func)(struct sk_buff *, void *),
void *data)
{
struct receiver *r = NULL;
struct hlist_head *rl;
struct s_pstats *can_pstats = net->can.can_pstats;
struct can_dev_rcv_lists *d;
struct receiver *rcv = NULL;
struct hlist_head *rcv_list;
struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
struct can_dev_rcv_lists *dev_rcv_lists;
if (dev && dev->type != ARPHRD_CAN)
return;
@@ -524,83 +521,69 @@ void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id,
if (dev && !net_eq(net, dev_net(dev)))
return;
spin_lock(&net->can.can_rcvlists_lock);
spin_lock_bh(&net->can.rcvlists_lock);
d = find_dev_rcv_lists(net, dev);
if (!d) {
pr_err("BUG: receive list not found for dev %s, id %03X, mask %03X\n",
DNAME(dev), can_id, mask);
goto out;
}
rl = find_rcv_list(&can_id, &mask, d);
dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
rcv_list = can_rcv_list_find(&can_id, &mask, dev_rcv_lists);
/* Search the receiver list for the item to delete. This should
* exist, since no receiver may be unregistered that hasn't
* been registered before.
*/
hlist_for_each_entry_rcu(r, rl, list) {
if (r->can_id == can_id && r->mask == mask &&
r->func == func && r->data == data)
hlist_for_each_entry_rcu(rcv, rcv_list, list) {
if (rcv->can_id == can_id && rcv->mask == mask &&
rcv->func == func && rcv->data == data)
break;
}
/* Check for bugs in CAN protocol implementations using af_can.c:
* 'r' will be NULL if no matching list item was found for removal.
* 'rcv' will be NULL if no matching list item was found for removal.
*/
if (!r) {
if (!rcv) {
WARN(1, "BUG: receive list entry not found for dev %s, id %03X, mask %03X\n",
DNAME(dev), can_id, mask);
goto out;
}
hlist_del_rcu(&r->list);
d->entries--;
hlist_del_rcu(&rcv->list);
dev_rcv_lists->entries--;
if (can_pstats->rcv_entries > 0)
can_pstats->rcv_entries--;
/* remove device structure requested by NETDEV_UNREGISTER */
if (d->remove_on_zero_entries && !d->entries) {
kfree(d);
dev->ml_priv = NULL;
}
if (rcv_lists_stats->rcv_entries > 0)
rcv_lists_stats->rcv_entries--;
out:
spin_unlock(&net->can.can_rcvlists_lock);
spin_unlock_bh(&net->can.rcvlists_lock);
/* schedule the receiver item for deletion */
if (r) {
if (r->sk)
sock_hold(r->sk);
call_rcu(&r->rcu, can_rx_delete_receiver);
if (rcv) {
if (rcv->sk)
sock_hold(rcv->sk);
call_rcu(&rcv->rcu, can_rx_delete_receiver);
}
}
EXPORT_SYMBOL(can_rx_unregister);
static inline void deliver(struct sk_buff *skb, struct receiver *r)
static inline void deliver(struct sk_buff *skb, struct receiver *rcv)
{
r->func(skb, r->data);
r->matches++;
rcv->func(skb, rcv->data);
rcv->matches++;
}
static int can_rcv_filter(struct can_dev_rcv_lists *d, struct sk_buff *skb)
static int can_rcv_filter(struct can_dev_rcv_lists *dev_rcv_lists, struct sk_buff *skb)
{
struct receiver *r;
struct receiver *rcv;
int matches = 0;
struct can_frame *cf = (struct can_frame *)skb->data;
canid_t can_id = cf->can_id;
if (d->entries == 0)
if (dev_rcv_lists->entries == 0)
return 0;
if (can_id & CAN_ERR_FLAG) {
/* check for error message frame entries only */
hlist_for_each_entry_rcu(r, &d->rx[RX_ERR], list) {
if (can_id & r->mask) {
deliver(skb, r);
hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_ERR], list) {
if (can_id & rcv->mask) {
deliver(skb, rcv);
matches++;
}
}
@@ -608,23 +591,23 @@ static int can_rcv_filter(struct can_dev_rcv_lists *d, struct sk_buff *skb)
}
/* check for unfiltered entries */
hlist_for_each_entry_rcu(r, &d->rx[RX_ALL], list) {
deliver(skb, r);
hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_ALL], list) {
deliver(skb, rcv);
matches++;
}
/* check for can_id/mask entries */
hlist_for_each_entry_rcu(r, &d->rx[RX_FIL], list) {
if ((can_id & r->mask) == r->can_id) {
deliver(skb, r);
hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_FIL], list) {
if ((can_id & rcv->mask) == rcv->can_id) {
deliver(skb, rcv);
matches++;
}
}
/* check for inverted can_id/mask entries */
hlist_for_each_entry_rcu(r, &d->rx[RX_INV], list) {
if ((can_id & r->mask) != r->can_id) {
deliver(skb, r);
hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_INV], list) {
if ((can_id & rcv->mask) != rcv->can_id) {
deliver(skb, rcv);
matches++;
}
}
@@ -634,16 +617,16 @@ static int can_rcv_filter(struct can_dev_rcv_lists *d, struct sk_buff *skb)
return matches;
if (can_id & CAN_EFF_FLAG) {
hlist_for_each_entry_rcu(r, &d->rx_eff[effhash(can_id)], list) {
if (r->can_id == can_id) {
deliver(skb, r);
hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx_eff[effhash(can_id)], list) {
if (rcv->can_id == can_id) {
deliver(skb, rcv);
matches++;
}
}
} else {
can_id &= CAN_SFF_MASK;
hlist_for_each_entry_rcu(r, &d->rx_sff[can_id], list) {
deliver(skb, r);
hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx_sff[can_id], list) {
deliver(skb, rcv);
matches++;
}
}
@@ -653,14 +636,14 @@ static int can_rcv_filter(struct can_dev_rcv_lists *d, struct sk_buff *skb)
static void can_receive(struct sk_buff *skb, struct net_device *dev)
{
struct can_dev_rcv_lists *d;
struct can_dev_rcv_lists *dev_rcv_lists;
struct net *net = dev_net(dev);
struct s_stats *can_stats = net->can.can_stats;
struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
int matches;
/* update statistics */
can_stats->rx_frames++;
can_stats->rx_frames_delta++;
pkg_stats->rx_frames++;
pkg_stats->rx_frames_delta++;
/* create non-zero unique skb identifier together with *skb */
while (!(can_skb_prv(skb)->skbcnt))
@@ -669,12 +652,11 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
/* deliver the packet to sockets listening on all devices */
matches = can_rcv_filter(net->can.can_rx_alldev_list, skb);
matches = can_rcv_filter(net->can.rx_alldev_list, skb);
/* find receive list for this device */
d = find_dev_rcv_lists(net, dev);
if (d)
matches += can_rcv_filter(d, skb);
dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
matches += can_rcv_filter(dev_rcv_lists, skb);
rcu_read_unlock();
@@ -682,8 +664,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
consume_skb(skb);
if (matches > 0) {
can_stats->matches++;
can_stats->matches_delta++;
pkg_stats->matches++;
pkg_stats->matches_delta++;
}
}
@@ -789,41 +771,14 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct can_dev_rcv_lists *d;
if (dev->type != ARPHRD_CAN)
return NOTIFY_DONE;
switch (msg) {
case NETDEV_REGISTER:
/* create new dev_rcv_lists for this device */
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return NOTIFY_DONE;
BUG_ON(dev->ml_priv);
dev->ml_priv = d;
break;
case NETDEV_UNREGISTER:
spin_lock(&dev_net(dev)->can.can_rcvlists_lock);
d = dev->ml_priv;
if (d) {
if (d->entries) {
d->remove_on_zero_entries = 1;
} else {
kfree(d);
dev->ml_priv = NULL;
}
} else {
pr_err("can: notifier: receive list not found for dev %s\n",
dev->name);
}
spin_unlock(&dev_net(dev)->can.can_rcvlists_lock);
WARN(!dev->ml_priv,
"No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
break;
}
@@ -832,66 +787,51 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
static int can_pernet_init(struct net *net)
{
spin_lock_init(&net->can.can_rcvlists_lock);
net->can.can_rx_alldev_list =
kzalloc(sizeof(*net->can.can_rx_alldev_list), GFP_KERNEL);
if (!net->can.can_rx_alldev_list)
spin_lock_init(&net->can.rcvlists_lock);
net->can.rx_alldev_list =
kzalloc(sizeof(*net->can.rx_alldev_list), GFP_KERNEL);
if (!net->can.rx_alldev_list)
goto out;
net->can.can_stats = kzalloc(sizeof(*net->can.can_stats), GFP_KERNEL);
if (!net->can.can_stats)
goto out_free_alldev_list;
net->can.can_pstats = kzalloc(sizeof(*net->can.can_pstats), GFP_KERNEL);
if (!net->can.can_pstats)
goto out_free_can_stats;
net->can.pkg_stats = kzalloc(sizeof(*net->can.pkg_stats), GFP_KERNEL);
if (!net->can.pkg_stats)
goto out_free_rx_alldev_list;
net->can.rcv_lists_stats = kzalloc(sizeof(*net->can.rcv_lists_stats), GFP_KERNEL);
if (!net->can.rcv_lists_stats)
goto out_free_pkg_stats;
if (IS_ENABLED(CONFIG_PROC_FS)) {
/* the statistics are updated every second (timer triggered) */
if (stats_timer) {
timer_setup(&net->can.can_stattimer, can_stat_update,
timer_setup(&net->can.stattimer, can_stat_update,
0);
mod_timer(&net->can.can_stattimer,
mod_timer(&net->can.stattimer,
round_jiffies(jiffies + HZ));
}
net->can.can_stats->jiffies_init = jiffies;
net->can.pkg_stats->jiffies_init = jiffies;
can_init_proc(net);
}
return 0;
out_free_can_stats:
kfree(net->can.can_stats);
out_free_alldev_list:
kfree(net->can.can_rx_alldev_list);
out_free_pkg_stats:
kfree(net->can.pkg_stats);
out_free_rx_alldev_list:
kfree(net->can.rx_alldev_list);
out:
return -ENOMEM;
}
static void can_pernet_exit(struct net *net)
{
struct net_device *dev;
if (IS_ENABLED(CONFIG_PROC_FS)) {
can_remove_proc(net);
if (stats_timer)
del_timer_sync(&net->can.can_stattimer);
del_timer_sync(&net->can.stattimer);
}
/* remove created dev_rcv_lists from still registered CAN devices */
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
if (dev->type == ARPHRD_CAN && dev->ml_priv) {
struct can_dev_rcv_lists *d = dev->ml_priv;
BUG_ON(d->entries);
kfree(d);
dev->ml_priv = NULL;
}
}
rcu_read_unlock();
kfree(net->can.can_rx_alldev_list);
kfree(net->can.can_stats);
kfree(net->can.can_pstats);
kfree(net->can.rx_alldev_list);
kfree(net->can.pkg_stats);
kfree(net->can.rcv_lists_stats);
}
/* af_can module init/exit functions */

View File

@@ -60,25 +60,10 @@ struct receiver {
struct rcu_head rcu;
};
#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
#define CAN_EFF_RCV_HASH_BITS 10
#define CAN_EFF_RCV_ARRAY_SZ (1 << CAN_EFF_RCV_HASH_BITS)
enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_MAX };
/* per device receive filters linked at dev->ml_priv */
struct can_dev_rcv_lists {
struct hlist_head rx[RX_MAX];
struct hlist_head rx_sff[CAN_SFF_RCV_ARRAY_SZ];
struct hlist_head rx_eff[CAN_EFF_RCV_ARRAY_SZ];
int remove_on_zero_entries;
int entries;
};
/* statistic structures */
/* can be reset e.g. by can_init_stats() */
struct s_stats {
struct can_pkg_stats {
unsigned long jiffies_init;
unsigned long rx_frames;
@@ -103,7 +88,7 @@ struct s_stats {
};
/* persistent statistics */
struct s_pstats {
struct can_rcv_lists_stats {
unsigned long stats_reset;
unsigned long user_reset;
unsigned long rcv_entries;

View File

@@ -1294,7 +1294,7 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
/* no bound device as default => check msg_name */
DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
if (msg->msg_namelen < sizeof(*addr))
if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
return -EINVAL;
if (addr->can_family != AF_CAN)
@@ -1536,7 +1536,7 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
struct net *net = sock_net(sk);
int ret = 0;
if (len < sizeof(*addr))
if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
return -EINVAL;
lock_sock(sk);

15
net/can/j1939/Kconfig Normal file
View File

@@ -0,0 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
#
# SAE J1939 network layer core configuration
#
config CAN_J1939
tristate "SAE J1939"
depends on CAN
help
SAE J1939
Say Y to have in-kernel support for j1939 socket type. This
allows communication according to SAE j1939.
The relevant parts in kernel are
SAE j1939-21 (datalink & transport protocol)
& SAE j1939-81 (network management).

10
net/can/j1939/Makefile Normal file
View File

@@ -0,0 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CAN_J1939) += can-j1939.o
can-j1939-objs := \
address-claim.o \
bus.o \
main.o \
socket.o \
transport.o

View File

@@ -0,0 +1,230 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2010-2011 EIA Electronics,
// Kurt Van Dijck <kurt.van.dijck@eia.be>
// Copyright (c) 2010-2011 EIA Electronics,
// Pieter Beyens <pieter.beyens@eia.be>
// Copyright (c) 2017-2019 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
// Copyright (c) 2017-2019 Pengutronix,
// Oleksij Rempel <kernel@pengutronix.de>
/* J1939 Address Claiming.
* Address Claiming in the kernel
* - keeps track of the AC states of ECU's,
* - resolves NAME<=>SA taking into account the AC states of ECU's.
*
* All Address Claim msgs (including host-originated msg) are processed
* at the receive path (a sent msg is always received again via CAN echo).
* As such, the processing of AC msgs is done in the order on which msgs
* are sent on the bus.
*
* This module doesn't send msgs itself (e.g. replies on Address Claims),
* this is the responsibility of a user space application or daemon.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include "j1939-priv.h"
static inline name_t j1939_skb_to_name(const struct sk_buff *skb)
{
return le64_to_cpup((__le64 *)skb->data);
}
static inline bool j1939_ac_msg_is_request(struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
int req_pgn;
if (skb->len < 3 || skcb->addr.pgn != J1939_PGN_REQUEST)
return false;
req_pgn = skb->data[0] | (skb->data[1] << 8) | (skb->data[2] << 16);
return req_pgn == J1939_PGN_ADDRESS_CLAIMED;
}
static int j1939_ac_verify_outgoing(struct j1939_priv *priv,
struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
if (skb->len != 8) {
netdev_notice(priv->ndev, "tx address claim with dlc %i\n",
skb->len);
return -EPROTO;
}
if (skcb->addr.src_name != j1939_skb_to_name(skb)) {
netdev_notice(priv->ndev, "tx address claim with different name\n");
return -EPROTO;
}
if (skcb->addr.sa == J1939_NO_ADDR) {
netdev_notice(priv->ndev, "tx address claim with broadcast sa\n");
return -EPROTO;
}
/* ac must always be a broadcast */
if (skcb->addr.dst_name || skcb->addr.da != J1939_NO_ADDR) {
netdev_notice(priv->ndev, "tx address claim with dest, not broadcast\n");
return -EPROTO;
}
return 0;
}
int j1939_ac_fixup(struct j1939_priv *priv, struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
int ret;
u8 addr;
/* network mgmt: address claiming msgs */
if (skcb->addr.pgn == J1939_PGN_ADDRESS_CLAIMED) {
struct j1939_ecu *ecu;
ret = j1939_ac_verify_outgoing(priv, skb);
/* return both when failure & when successful */
if (ret < 0)
return ret;
ecu = j1939_ecu_get_by_name(priv, skcb->addr.src_name);
if (!ecu)
return -ENODEV;
if (ecu->addr != skcb->addr.sa)
/* hold further traffic for ecu, remove from parent */
j1939_ecu_unmap(ecu);
j1939_ecu_put(ecu);
} else if (skcb->addr.src_name) {
/* assign source address */
addr = j1939_name_to_addr(priv, skcb->addr.src_name);
if (!j1939_address_is_unicast(addr) &&
!j1939_ac_msg_is_request(skb)) {
netdev_notice(priv->ndev, "tx drop: invalid sa for name 0x%016llx\n",
skcb->addr.src_name);
return -EADDRNOTAVAIL;
}
skcb->addr.sa = addr;
}
/* assign destination address */
if (skcb->addr.dst_name) {
addr = j1939_name_to_addr(priv, skcb->addr.dst_name);
if (!j1939_address_is_unicast(addr)) {
netdev_notice(priv->ndev, "tx drop: invalid da for name 0x%016llx\n",
skcb->addr.dst_name);
return -EADDRNOTAVAIL;
}
skcb->addr.da = addr;
}
return 0;
}
static void j1939_ac_process(struct j1939_priv *priv, struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
struct j1939_ecu *ecu, *prev;
name_t name;
if (skb->len != 8) {
netdev_notice(priv->ndev, "rx address claim with wrong dlc %i\n",
skb->len);
return;
}
name = j1939_skb_to_name(skb);
skcb->addr.src_name = name;
if (!name) {
netdev_notice(priv->ndev, "rx address claim without name\n");
return;
}
if (!j1939_address_is_valid(skcb->addr.sa)) {
netdev_notice(priv->ndev, "rx address claim with broadcast sa\n");
return;
}
write_lock_bh(&priv->lock);
/* Few words on the ECU ref counting:
*
* First we get an ECU handle, either with
* j1939_ecu_get_by_name_locked() (increments the ref counter)
* or j1939_ecu_create_locked() (initializes an ECU object
* with a ref counter of 1).
*
* j1939_ecu_unmap_locked() will decrement the ref counter,
* but only if the ECU was mapped before. So "ecu" still
* belongs to us.
*
* j1939_ecu_timer_start() will increment the ref counter
* before it starts the timer, so we can put the ecu when
* leaving this function.
*/
ecu = j1939_ecu_get_by_name_locked(priv, name);
if (!ecu && j1939_address_is_unicast(skcb->addr.sa))
ecu = j1939_ecu_create_locked(priv, name);
if (IS_ERR_OR_NULL(ecu))
goto out_unlock_bh;
/* cancel pending (previous) address claim */
j1939_ecu_timer_cancel(ecu);
if (j1939_address_is_idle(skcb->addr.sa)) {
j1939_ecu_unmap_locked(ecu);
goto out_ecu_put;
}
/* save new addr */
if (ecu->addr != skcb->addr.sa)
j1939_ecu_unmap_locked(ecu);
ecu->addr = skcb->addr.sa;
prev = j1939_ecu_get_by_addr_locked(priv, skcb->addr.sa);
if (prev) {
if (ecu->name > prev->name) {
j1939_ecu_unmap_locked(ecu);
j1939_ecu_put(prev);
goto out_ecu_put;
} else {
/* kick prev if less or equal */
j1939_ecu_unmap_locked(prev);
j1939_ecu_put(prev);
}
}
j1939_ecu_timer_start(ecu);
out_ecu_put:
j1939_ecu_put(ecu);
out_unlock_bh:
write_unlock_bh(&priv->lock);
}
void j1939_ac_recv(struct j1939_priv *priv, struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
struct j1939_ecu *ecu;
/* network mgmt */
if (skcb->addr.pgn == J1939_PGN_ADDRESS_CLAIMED) {
j1939_ac_process(priv, skb);
} else if (j1939_address_is_unicast(skcb->addr.sa)) {
/* assign source name */
ecu = j1939_ecu_get_by_addr(priv, skcb->addr.sa);
if (ecu) {
skcb->addr.src_name = ecu->name;
j1939_ecu_put(ecu);
}
}
/* assign destination name */
ecu = j1939_ecu_get_by_addr(priv, skcb->addr.da);
if (ecu) {
skcb->addr.dst_name = ecu->name;
j1939_ecu_put(ecu);
}
}

333
net/can/j1939/bus.c Normal file
View File

@@ -0,0 +1,333 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2010-2011 EIA Electronics,
// Kurt Van Dijck <kurt.van.dijck@eia.be>
// Copyright (c) 2017-2019 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
// Copyright (c) 2017-2019 Pengutronix,
// Oleksij Rempel <kernel@pengutronix.de>
/* bus for j1939 remote devices
* Since rtnetlink, no real bus is used.
*/
#include <net/sock.h>
#include "j1939-priv.h"
static void __j1939_ecu_release(struct kref *kref)
{
struct j1939_ecu *ecu = container_of(kref, struct j1939_ecu, kref);
struct j1939_priv *priv = ecu->priv;
list_del(&ecu->list);
kfree(ecu);
j1939_priv_put(priv);
}
void j1939_ecu_put(struct j1939_ecu *ecu)
{
kref_put(&ecu->kref, __j1939_ecu_release);
}
static void j1939_ecu_get(struct j1939_ecu *ecu)
{
kref_get(&ecu->kref);
}
static bool j1939_ecu_is_mapped_locked(struct j1939_ecu *ecu)
{
struct j1939_priv *priv = ecu->priv;
lockdep_assert_held(&priv->lock);
return j1939_ecu_find_by_addr_locked(priv, ecu->addr) == ecu;
}
/* ECU device interface */
/* map ECU to a bus address space */
static void j1939_ecu_map_locked(struct j1939_ecu *ecu)
{
struct j1939_priv *priv = ecu->priv;
struct j1939_addr_ent *ent;
lockdep_assert_held(&priv->lock);
if (!j1939_address_is_unicast(ecu->addr))
return;
ent = &priv->ents[ecu->addr];
if (ent->ecu) {
netdev_warn(priv->ndev, "Trying to map already mapped ECU, addr: 0x%02x, name: 0x%016llx. Skip it.\n",
ecu->addr, ecu->name);
return;
}
j1939_ecu_get(ecu);
ent->ecu = ecu;
ent->nusers += ecu->nusers;
}
/* unmap ECU from a bus address space */
void j1939_ecu_unmap_locked(struct j1939_ecu *ecu)
{
struct j1939_priv *priv = ecu->priv;
struct j1939_addr_ent *ent;
lockdep_assert_held(&priv->lock);
if (!j1939_address_is_unicast(ecu->addr))
return;
if (!j1939_ecu_is_mapped_locked(ecu))
return;
ent = &priv->ents[ecu->addr];
ent->ecu = NULL;
ent->nusers -= ecu->nusers;
j1939_ecu_put(ecu);
}
void j1939_ecu_unmap(struct j1939_ecu *ecu)
{
write_lock_bh(&ecu->priv->lock);
j1939_ecu_unmap_locked(ecu);
write_unlock_bh(&ecu->priv->lock);
}
void j1939_ecu_unmap_all(struct j1939_priv *priv)
{
int i;
write_lock_bh(&priv->lock);
for (i = 0; i < ARRAY_SIZE(priv->ents); i++)
if (priv->ents[i].ecu)
j1939_ecu_unmap_locked(priv->ents[i].ecu);
write_unlock_bh(&priv->lock);
}
void j1939_ecu_timer_start(struct j1939_ecu *ecu)
{
/* The ECU is held here and released in the
* j1939_ecu_timer_handler() or j1939_ecu_timer_cancel().
*/
j1939_ecu_get(ecu);
/* Schedule timer in 250 msec to commit address change. */
hrtimer_start(&ecu->ac_timer, ms_to_ktime(250),
HRTIMER_MODE_REL_SOFT);
}
void j1939_ecu_timer_cancel(struct j1939_ecu *ecu)
{
if (hrtimer_cancel(&ecu->ac_timer))
j1939_ecu_put(ecu);
}
static enum hrtimer_restart j1939_ecu_timer_handler(struct hrtimer *hrtimer)
{
struct j1939_ecu *ecu =
container_of(hrtimer, struct j1939_ecu, ac_timer);
struct j1939_priv *priv = ecu->priv;
write_lock_bh(&priv->lock);
/* TODO: can we test if ecu->addr is unicast before starting
* the timer?
*/
j1939_ecu_map_locked(ecu);
/* The corresponding j1939_ecu_get() is in
* j1939_ecu_timer_start().
*/
j1939_ecu_put(ecu);
write_unlock_bh(&priv->lock);
return HRTIMER_NORESTART;
}
struct j1939_ecu *j1939_ecu_create_locked(struct j1939_priv *priv, name_t name)
{
struct j1939_ecu *ecu;
lockdep_assert_held(&priv->lock);
ecu = kzalloc(sizeof(*ecu), gfp_any());
if (!ecu)
return ERR_PTR(-ENOMEM);
kref_init(&ecu->kref);
ecu->addr = J1939_IDLE_ADDR;
ecu->name = name;
hrtimer_init(&ecu->ac_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
ecu->ac_timer.function = j1939_ecu_timer_handler;
INIT_LIST_HEAD(&ecu->list);
j1939_priv_get(priv);
ecu->priv = priv;
list_add_tail(&ecu->list, &priv->ecus);
return ecu;
}
struct j1939_ecu *j1939_ecu_find_by_addr_locked(struct j1939_priv *priv,
u8 addr)
{
lockdep_assert_held(&priv->lock);
return priv->ents[addr].ecu;
}
struct j1939_ecu *j1939_ecu_get_by_addr_locked(struct j1939_priv *priv, u8 addr)
{
struct j1939_ecu *ecu;
lockdep_assert_held(&priv->lock);
if (!j1939_address_is_unicast(addr))
return NULL;
ecu = j1939_ecu_find_by_addr_locked(priv, addr);
if (ecu)
j1939_ecu_get(ecu);
return ecu;
}
struct j1939_ecu *j1939_ecu_get_by_addr(struct j1939_priv *priv, u8 addr)
{
struct j1939_ecu *ecu;
read_lock_bh(&priv->lock);
ecu = j1939_ecu_get_by_addr_locked(priv, addr);
read_unlock_bh(&priv->lock);
return ecu;
}
/* get pointer to ecu without increasing ref counter */
static struct j1939_ecu *j1939_ecu_find_by_name_locked(struct j1939_priv *priv,
name_t name)
{
struct j1939_ecu *ecu;
lockdep_assert_held(&priv->lock);
list_for_each_entry(ecu, &priv->ecus, list) {
if (ecu->name == name)
return ecu;
}
return NULL;
}
struct j1939_ecu *j1939_ecu_get_by_name_locked(struct j1939_priv *priv,
name_t name)
{
struct j1939_ecu *ecu;
lockdep_assert_held(&priv->lock);
if (!name)
return NULL;
ecu = j1939_ecu_find_by_name_locked(priv, name);
if (ecu)
j1939_ecu_get(ecu);
return ecu;
}
struct j1939_ecu *j1939_ecu_get_by_name(struct j1939_priv *priv, name_t name)
{
struct j1939_ecu *ecu;
read_lock_bh(&priv->lock);
ecu = j1939_ecu_get_by_name_locked(priv, name);
read_unlock_bh(&priv->lock);
return ecu;
}
u8 j1939_name_to_addr(struct j1939_priv *priv, name_t name)
{
struct j1939_ecu *ecu;
int addr = J1939_IDLE_ADDR;
if (!name)
return J1939_NO_ADDR;
read_lock_bh(&priv->lock);
ecu = j1939_ecu_find_by_name_locked(priv, name);
if (ecu && j1939_ecu_is_mapped_locked(ecu))
/* ecu's SA is registered */
addr = ecu->addr;
read_unlock_bh(&priv->lock);
return addr;
}
/* TX addr/name accounting
* Transport protocol needs to know if a SA is local or not
* These functions originate from userspace manipulating sockets,
* so locking is straigforward
*/
int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa)
{
struct j1939_ecu *ecu;
int err = 0;
write_lock_bh(&priv->lock);
if (j1939_address_is_unicast(sa))
priv->ents[sa].nusers++;
if (!name)
goto done;
ecu = j1939_ecu_get_by_name_locked(priv, name);
if (!ecu)
ecu = j1939_ecu_create_locked(priv, name);
err = PTR_ERR_OR_ZERO(ecu);
if (err)
goto done;
ecu->nusers++;
/* TODO: do we care if ecu->addr != sa? */
if (j1939_ecu_is_mapped_locked(ecu))
/* ecu's sa is active already */
priv->ents[ecu->addr].nusers++;
done:
write_unlock_bh(&priv->lock);
return err;
}
void j1939_local_ecu_put(struct j1939_priv *priv, name_t name, u8 sa)
{
struct j1939_ecu *ecu;
write_lock_bh(&priv->lock);
if (j1939_address_is_unicast(sa))
priv->ents[sa].nusers--;
if (!name)
goto done;
ecu = j1939_ecu_find_by_name_locked(priv, name);
if (WARN_ON_ONCE(!ecu))
goto done;
ecu->nusers--;
/* TODO: do we care if ecu->addr != sa? */
if (j1939_ecu_is_mapped_locked(ecu))
/* ecu's sa is active already */
priv->ents[ecu->addr].nusers--;
j1939_ecu_put(ecu);
done:
write_unlock_bh(&priv->lock);
}

338
net/can/j1939/j1939-priv.h Normal file
View File

@@ -0,0 +1,338 @@
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (c) 2010-2011 EIA Electronics,
// Kurt Van Dijck <kurt.van.dijck@eia.be>
// Copyright (c) 2017-2019 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
// Copyright (c) 2017-2019 Pengutronix,
// Oleksij Rempel <kernel@pengutronix.de>
#ifndef _J1939_PRIV_H_
#define _J1939_PRIV_H_
#include <linux/can/j1939.h>
#include <net/sock.h>
/* Timeout to receive the abort signal over loop back. In case CAN
* bus is open, the timeout should be triggered.
*/
#define J1939_XTP_ABORT_TIMEOUT_MS 500
#define J1939_SIMPLE_ECHO_TIMEOUT_MS (10 * 1000)
struct j1939_session;
enum j1939_sk_errqueue_type {
J1939_ERRQUEUE_ACK,
J1939_ERRQUEUE_SCHED,
J1939_ERRQUEUE_ABORT,
};
/* j1939 devices */
struct j1939_ecu {
struct list_head list;
name_t name;
u8 addr;
/* indicates that this ecu successfully claimed @sa as its address */
struct hrtimer ac_timer;
struct kref kref;
struct j1939_priv *priv;
/* count users, to help transport protocol decide for interaction */
int nusers;
};
struct j1939_priv {
struct list_head ecus;
/* local list entry in priv
* These allow irq (& softirq) context lookups on j1939 devices
* This approach (separate lists) is done as the other 2 alternatives
* are not easier or even wrong
* 1) using the pure kobject methods involves mutexes, which are not
* allowed in irq context.
* 2) duplicating data structures would require a lot of synchronization
* code
* usage:
*/
/* segments need a lock to protect the above list */
rwlock_t lock;
struct net_device *ndev;
/* list of 256 ecu ptrs, that cache the claimed addresses.
* also protected by the above lock
*/
struct j1939_addr_ent {
struct j1939_ecu *ecu;
/* count users, to help transport protocol */
int nusers;
} ents[256];
struct kref kref;
/* List of active sessions to prevent start of conflicting
* one.
*
* Do not start two sessions of same type, addresses and
* direction.
*/
struct list_head active_session_list;
/* protects active_session_list */
spinlock_t active_session_list_lock;
unsigned int tp_max_packet_size;
/* lock for j1939_socks list */
spinlock_t j1939_socks_lock;
struct list_head j1939_socks;
struct kref rx_kref;
};
void j1939_ecu_put(struct j1939_ecu *ecu);
/* keep the cache of what is local */
int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa);
void j1939_local_ecu_put(struct j1939_priv *priv, name_t name, u8 sa);
static inline bool j1939_address_is_unicast(u8 addr)
{
return addr <= J1939_MAX_UNICAST_ADDR;
}
static inline bool j1939_address_is_idle(u8 addr)
{
return addr == J1939_IDLE_ADDR;
}
static inline bool j1939_address_is_valid(u8 addr)
{
return addr != J1939_NO_ADDR;
}
static inline bool j1939_pgn_is_pdu1(pgn_t pgn)
{
/* ignore dp & res bits for this */
return (pgn & 0xff00) < 0xf000;
}
/* utility to correctly unmap an ECU */
void j1939_ecu_unmap_locked(struct j1939_ecu *ecu);
void j1939_ecu_unmap(struct j1939_ecu *ecu);
u8 j1939_name_to_addr(struct j1939_priv *priv, name_t name);
struct j1939_ecu *j1939_ecu_find_by_addr_locked(struct j1939_priv *priv,
u8 addr);
struct j1939_ecu *j1939_ecu_get_by_addr(struct j1939_priv *priv, u8 addr);
struct j1939_ecu *j1939_ecu_get_by_addr_locked(struct j1939_priv *priv,
u8 addr);
struct j1939_ecu *j1939_ecu_get_by_name(struct j1939_priv *priv, name_t name);
struct j1939_ecu *j1939_ecu_get_by_name_locked(struct j1939_priv *priv,
name_t name);
enum j1939_transfer_type {
J1939_TP,
J1939_ETP,
J1939_SIMPLE,
};
struct j1939_addr {
name_t src_name;
name_t dst_name;
pgn_t pgn;
u8 sa;
u8 da;
u8 type;
};
/* control buffer of the sk_buff */
struct j1939_sk_buff_cb {
/* Offset in bytes within one ETP session */
u32 offset;
/* for tx, MSG_SYN will be used to sync on sockets */
u32 msg_flags;
u32 tskey;
struct j1939_addr addr;
/* Flags for quick lookups during skb processing.
* These are set in the receive path only.
*/
#define J1939_ECU_LOCAL_SRC BIT(0)
#define J1939_ECU_LOCAL_DST BIT(1)
u8 flags;
priority_t priority;
};
static inline
struct j1939_sk_buff_cb *j1939_skb_to_cb(const struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct j1939_sk_buff_cb) > sizeof(skb->cb));
return (struct j1939_sk_buff_cb *)skb->cb;
}
int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb);
void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb);
bool j1939_sk_recv_match(struct j1939_priv *priv,
struct j1939_sk_buff_cb *skcb);
void j1939_sk_send_loop_abort(struct sock *sk, int err);
void j1939_sk_errqueue(struct j1939_session *session,
enum j1939_sk_errqueue_type type);
void j1939_sk_queue_activate_next(struct j1939_session *session);
/* stack entries */
struct j1939_session *j1939_tp_send(struct j1939_priv *priv,
struct sk_buff *skb, size_t size);
int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb);
int j1939_ac_fixup(struct j1939_priv *priv, struct sk_buff *skb);
void j1939_ac_recv(struct j1939_priv *priv, struct sk_buff *skb);
void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb);
/* network management */
struct j1939_ecu *j1939_ecu_create_locked(struct j1939_priv *priv, name_t name);
void j1939_ecu_timer_start(struct j1939_ecu *ecu);
void j1939_ecu_timer_cancel(struct j1939_ecu *ecu);
void j1939_ecu_unmap_all(struct j1939_priv *priv);
struct j1939_priv *j1939_netdev_start(struct net_device *ndev);
void j1939_netdev_stop(struct j1939_priv *priv);
void j1939_priv_put(struct j1939_priv *priv);
void j1939_priv_get(struct j1939_priv *priv);
/* notify/alert all j1939 sockets bound to ifindex */
void j1939_sk_netdev_event_netdown(struct j1939_priv *priv);
int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk);
void j1939_tp_init(struct j1939_priv *priv);
/* decrement pending skb for a j1939 socket */
void j1939_sock_pending_del(struct sock *sk);
enum j1939_session_state {
J1939_SESSION_NEW,
J1939_SESSION_ACTIVE,
/* waiting for abort signal on the bus */
J1939_SESSION_WAITING_ABORT,
J1939_SESSION_ACTIVE_MAX,
J1939_SESSION_DONE,
};
struct j1939_session {
struct j1939_priv *priv;
struct list_head active_session_list_entry;
struct list_head sk_session_queue_entry;
struct kref kref;
struct sock *sk;
/* ifindex, src, dst, pgn define the session block
* the are _never_ modified after insertion in the list
* this decreases locking problems a _lot_
*/
struct j1939_sk_buff_cb skcb;
struct sk_buff_head skb_queue;
/* all tx related stuff (last_txcmd, pkt.tx)
* is protected (modified only) with the txtimer hrtimer
* 'total' & 'block' are never changed,
* last_cmd, last & block are protected by ->lock
* this means that the tx may run after cts is received that should
* have stopped tx, but this time discrepancy is never avoided anyhow
*/
u8 last_cmd, last_txcmd;
bool transmission;
bool extd;
/* Total message size, number of bytes */
unsigned int total_message_size;
/* Total number of bytes queue from socket to the session */
unsigned int total_queued_size;
unsigned int tx_retry;
int err;
u32 tskey;
enum j1939_session_state state;
/* Packets counters for a (extended) transfer session. The packet is
* maximal of 7 bytes.
*/
struct {
/* total - total number of packets for this session */
unsigned int total;
/* last - last packet of a transfer block after which
* responder should send ETP.CM_CTS and originator
* ETP.CM_DPO
*/
unsigned int last;
/* tx - number of packets send by originator node.
* this counter can be set back if responder node
* didn't received all packets send by originator.
*/
unsigned int tx;
unsigned int tx_acked;
/* rx - number of packets received */
unsigned int rx;
/* block - amount of packets expected in one block */
unsigned int block;
/* dpo - ETP.CM_DPO, Data Packet Offset */
unsigned int dpo;
} pkt;
struct hrtimer txtimer, rxtimer;
};
struct j1939_sock {
struct sock sk; /* must be first to skip with memset */
struct j1939_priv *priv;
struct list_head list;
#define J1939_SOCK_BOUND BIT(0)
#define J1939_SOCK_CONNECTED BIT(1)
#define J1939_SOCK_PROMISC BIT(2)
#define J1939_SOCK_ERRQUEUE BIT(3)
int state;
int ifindex;
struct j1939_addr addr;
struct j1939_filter *filters;
int nfilters;
pgn_t pgn_rx_filter;
/* j1939 may emit equal PGN (!= equal CAN-id's) out of order
* when transport protocol comes in.
* To allow emitting in order, keep a 'pending' nr. of packets
*/
atomic_t skb_pending;
wait_queue_head_t waitq;
/* lock for the sk_session_queue list */
spinlock_t sk_session_queue_lock;
struct list_head sk_session_queue;
};
static inline struct j1939_sock *j1939_sk(const struct sock *sk)
{
return container_of(sk, struct j1939_sock, sk);
}
void j1939_session_get(struct j1939_session *session);
void j1939_session_put(struct j1939_session *session);
void j1939_session_skb_queue(struct j1939_session *session,
struct sk_buff *skb);
int j1939_session_activate(struct j1939_session *session);
void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec);
void j1939_session_timers_cancel(struct j1939_session *session);
#define J1939_MAX_TP_PACKET_SIZE (7 * 0xff)
#define J1939_MAX_ETP_PACKET_SIZE (7 * 0x00ffffff)
#define J1939_REGULAR 0
#define J1939_EXTENDED 1
/* CAN protocol */
extern const struct can_proto j1939_can_proto;
#endif /* _J1939_PRIV_H_ */

403
net/can/j1939/main.c Normal file
View File

@@ -0,0 +1,403 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2010-2011 EIA Electronics,
// Pieter Beyens <pieter.beyens@eia.be>
// Copyright (c) 2010-2011 EIA Electronics,
// Kurt Van Dijck <kurt.van.dijck@eia.be>
// Copyright (c) 2018 Protonic,
// Robin van der Gracht <robin@protonic.nl>
// Copyright (c) 2017-2019 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
// Copyright (c) 2017-2019 Pengutronix,
// Oleksij Rempel <kernel@pengutronix.de>
/* Core of can-j1939 that links j1939 to CAN. */
#include <linux/can/can-ml.h>
#include <linux/can/core.h>
#include <linux/can/skb.h>
#include <linux/if_arp.h>
#include <linux/module.h>
#include "j1939-priv.h"
MODULE_DESCRIPTION("PF_CAN SAE J1939");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("EIA Electronics (Kurt Van Dijck & Pieter Beyens)");
MODULE_ALIAS("can-proto-" __stringify(CAN_J1939));
/* LOWLEVEL CAN interface */
/* CAN_HDR: #bytes before can_frame data part */
#define J1939_CAN_HDR (offsetof(struct can_frame, data))
/* CAN_FTR: #bytes beyond data part */
#define J1939_CAN_FTR (sizeof(struct can_frame) - J1939_CAN_HDR - \
sizeof(((struct can_frame *)0)->data))
/* lowest layer */
static void j1939_can_recv(struct sk_buff *iskb, void *data)
{
struct j1939_priv *priv = data;
struct sk_buff *skb;
struct j1939_sk_buff_cb *skcb, *iskcb;
struct can_frame *cf;
/* create a copy of the skb
* j1939 only delivers the real data bytes,
* the header goes into sockaddr.
* j1939 may not touch the incoming skb in such way
*/
skb = skb_clone(iskb, GFP_ATOMIC);
if (!skb)
return;
can_skb_set_owner(skb, iskb->sk);
/* get a pointer to the header of the skb
* the skb payload (pointer) is moved, so that the next skb_data
* returns the actual payload
*/
cf = (void *)skb->data;
skb_pull(skb, J1939_CAN_HDR);
/* fix length, set to dlc, with 8 maximum */
skb_trim(skb, min_t(uint8_t, cf->can_dlc, 8));
/* set addr */
skcb = j1939_skb_to_cb(skb);
memset(skcb, 0, sizeof(*skcb));
iskcb = j1939_skb_to_cb(iskb);
skcb->tskey = iskcb->tskey;
skcb->priority = (cf->can_id >> 26) & 0x7;
skcb->addr.sa = cf->can_id;
skcb->addr.pgn = (cf->can_id >> 8) & J1939_PGN_MAX;
/* set default message type */
skcb->addr.type = J1939_TP;
if (j1939_pgn_is_pdu1(skcb->addr.pgn)) {
/* Type 1: with destination address */
skcb->addr.da = skcb->addr.pgn;
/* normalize pgn: strip dst address */
skcb->addr.pgn &= 0x3ff00;
} else {
/* set broadcast address */
skcb->addr.da = J1939_NO_ADDR;
}
/* update localflags */
read_lock_bh(&priv->lock);
if (j1939_address_is_unicast(skcb->addr.sa) &&
priv->ents[skcb->addr.sa].nusers)
skcb->flags |= J1939_ECU_LOCAL_SRC;
if (j1939_address_is_unicast(skcb->addr.da) &&
priv->ents[skcb->addr.da].nusers)
skcb->flags |= J1939_ECU_LOCAL_DST;
read_unlock_bh(&priv->lock);
/* deliver into the j1939 stack ... */
j1939_ac_recv(priv, skb);
if (j1939_tp_recv(priv, skb))
/* this means the transport layer processed the message */
goto done;
j1939_simple_recv(priv, skb);
j1939_sk_recv(priv, skb);
done:
kfree_skb(skb);
}
/* NETDEV MANAGEMENT */
/* values for can_rx_(un)register */
#define J1939_CAN_ID CAN_EFF_FLAG
#define J1939_CAN_MASK (CAN_EFF_FLAG | CAN_RTR_FLAG)
static DEFINE_SPINLOCK(j1939_netdev_lock);
static struct j1939_priv *j1939_priv_create(struct net_device *ndev)
{
struct j1939_priv *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return NULL;
rwlock_init(&priv->lock);
INIT_LIST_HEAD(&priv->ecus);
priv->ndev = ndev;
kref_init(&priv->kref);
kref_init(&priv->rx_kref);
dev_hold(ndev);
netdev_dbg(priv->ndev, "%s : 0x%p\n", __func__, priv);
return priv;
}
static inline void j1939_priv_set(struct net_device *ndev,
struct j1939_priv *priv)
{
struct can_ml_priv *can_ml_priv = ndev->ml_priv;
can_ml_priv->j1939_priv = priv;
}
static void __j1939_priv_release(struct kref *kref)
{
struct j1939_priv *priv = container_of(kref, struct j1939_priv, kref);
struct net_device *ndev = priv->ndev;
netdev_dbg(priv->ndev, "%s: 0x%p\n", __func__, priv);
dev_put(ndev);
kfree(priv);
}
void j1939_priv_put(struct j1939_priv *priv)
{
kref_put(&priv->kref, __j1939_priv_release);
}
void j1939_priv_get(struct j1939_priv *priv)
{
kref_get(&priv->kref);
}
static int j1939_can_rx_register(struct j1939_priv *priv)
{
struct net_device *ndev = priv->ndev;
int ret;
j1939_priv_get(priv);
ret = can_rx_register(dev_net(ndev), ndev, J1939_CAN_ID, J1939_CAN_MASK,
j1939_can_recv, priv, "j1939", NULL);
if (ret < 0) {
j1939_priv_put(priv);
return ret;
}
return 0;
}
static void j1939_can_rx_unregister(struct j1939_priv *priv)
{
struct net_device *ndev = priv->ndev;
can_rx_unregister(dev_net(ndev), ndev, J1939_CAN_ID, J1939_CAN_MASK,
j1939_can_recv, priv);
j1939_priv_put(priv);
}
static void __j1939_rx_release(struct kref *kref)
__releases(&j1939_netdev_lock)
{
struct j1939_priv *priv = container_of(kref, struct j1939_priv,
rx_kref);
j1939_can_rx_unregister(priv);
j1939_ecu_unmap_all(priv);
j1939_priv_set(priv->ndev, NULL);
spin_unlock(&j1939_netdev_lock);
}
/* get pointer to priv without increasing ref counter */
static inline struct j1939_priv *j1939_ndev_to_priv(struct net_device *ndev)
{
struct can_ml_priv *can_ml_priv = ndev->ml_priv;
return can_ml_priv->j1939_priv;
}
static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev)
{
struct j1939_priv *priv;
lockdep_assert_held(&j1939_netdev_lock);
if (ndev->type != ARPHRD_CAN)
return NULL;
priv = j1939_ndev_to_priv(ndev);
if (priv)
j1939_priv_get(priv);
return priv;
}
static struct j1939_priv *j1939_priv_get_by_ndev(struct net_device *ndev)
{
struct j1939_priv *priv;
spin_lock(&j1939_netdev_lock);
priv = j1939_priv_get_by_ndev_locked(ndev);
spin_unlock(&j1939_netdev_lock);
return priv;
}
struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
{
struct j1939_priv *priv, *priv_new;
int ret;
priv = j1939_priv_get_by_ndev(ndev);
if (priv) {
kref_get(&priv->rx_kref);
return priv;
}
priv = j1939_priv_create(ndev);
if (!priv)
return ERR_PTR(-ENOMEM);
j1939_tp_init(priv);
spin_lock_init(&priv->j1939_socks_lock);
INIT_LIST_HEAD(&priv->j1939_socks);
spin_lock(&j1939_netdev_lock);
priv_new = j1939_priv_get_by_ndev_locked(ndev);
if (priv_new) {
/* Someone was faster than us, use their priv and roll
* back our's.
*/
spin_unlock(&j1939_netdev_lock);
dev_put(ndev);
kfree(priv);
kref_get(&priv_new->rx_kref);
return priv_new;
}
j1939_priv_set(ndev, priv);
spin_unlock(&j1939_netdev_lock);
ret = j1939_can_rx_register(priv);
if (ret < 0)
goto out_priv_put;
return priv;
out_priv_put:
j1939_priv_set(ndev, NULL);
dev_put(ndev);
kfree(priv);
return ERR_PTR(ret);
}
void j1939_netdev_stop(struct j1939_priv *priv)
{
kref_put_lock(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
j1939_priv_put(priv);
}
int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb)
{
int ret, dlc;
canid_t canid;
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
struct can_frame *cf;
/* apply sanity checks */
if (j1939_pgn_is_pdu1(skcb->addr.pgn))
skcb->addr.pgn &= J1939_PGN_PDU1_MAX;
else
skcb->addr.pgn &= J1939_PGN_MAX;
if (skcb->priority > 7)
skcb->priority = 6;
ret = j1939_ac_fixup(priv, skb);
if (unlikely(ret))
goto failed;
dlc = skb->len;
/* re-claim the CAN_HDR from the SKB */
cf = skb_push(skb, J1939_CAN_HDR);
/* make it a full can frame again */
skb_put(skb, J1939_CAN_FTR + (8 - dlc));
canid = CAN_EFF_FLAG |
(skcb->priority << 26) |
(skcb->addr.pgn << 8) |
skcb->addr.sa;
if (j1939_pgn_is_pdu1(skcb->addr.pgn))
canid |= skcb->addr.da << 8;
cf->can_id = canid;
cf->can_dlc = dlc;
return can_send(skb, 1);
failed:
kfree_skb(skb);
return ret;
}
static int j1939_netdev_notify(struct notifier_block *nb,
unsigned long msg, void *data)
{
struct net_device *ndev = netdev_notifier_info_to_dev(data);
struct j1939_priv *priv;
priv = j1939_priv_get_by_ndev(ndev);
if (!priv)
goto notify_done;
if (ndev->type != ARPHRD_CAN)
goto notify_put;
switch (msg) {
case NETDEV_DOWN:
j1939_cancel_active_session(priv, NULL);
j1939_sk_netdev_event_netdown(priv);
j1939_ecu_unmap_all(priv);
break;
}
notify_put:
j1939_priv_put(priv);
notify_done:
return NOTIFY_DONE;
}
static struct notifier_block j1939_netdev_notifier = {
.notifier_call = j1939_netdev_notify,
};
/* MODULE interface */
static __init int j1939_module_init(void)
{
int ret;
pr_info("can: SAE J1939\n");
ret = register_netdevice_notifier(&j1939_netdev_notifier);
if (ret)
goto fail_notifier;
ret = can_proto_register(&j1939_can_proto);
if (ret < 0) {
pr_err("can: registration of j1939 protocol failed\n");
goto fail_sk;
}
return 0;
fail_sk:
unregister_netdevice_notifier(&j1939_netdev_notifier);
fail_notifier:
return ret;
}
static __exit void j1939_module_exit(void)
{
can_proto_unregister(&j1939_can_proto);
unregister_netdevice_notifier(&j1939_netdev_notifier);
}
module_init(j1939_module_init);
module_exit(j1939_module_exit);

1160
net/can/j1939/socket.c Normal file

File diff suppressed because it is too large Load Diff

2027
net/can/j1939/transport.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -45,6 +45,7 @@
#include <linux/list.h>
#include <linux/rcupdate.h>
#include <linux/if_arp.h>
#include <linux/can/can-ml.h>
#include <linux/can/core.h>
#include "af_can.h"
@@ -78,21 +79,21 @@ static const char rx_list_name[][8] = {
static void can_init_stats(struct net *net)
{
struct s_stats *can_stats = net->can.can_stats;
struct s_pstats *can_pstats = net->can.can_pstats;
struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
/*
* This memset function is called from a timer context (when
* can_stattimer is active which is the default) OR in a process
* context (reading the proc_fs when can_stattimer is disabled).
*/
memset(can_stats, 0, sizeof(struct s_stats));
can_stats->jiffies_init = jiffies;
memset(pkg_stats, 0, sizeof(struct can_pkg_stats));
pkg_stats->jiffies_init = jiffies;
can_pstats->stats_reset++;
rcv_lists_stats->stats_reset++;
if (user_reset) {
user_reset = 0;
can_pstats->user_reset++;
rcv_lists_stats->user_reset++;
}
}
@@ -118,8 +119,8 @@ static unsigned long calc_rate(unsigned long oldjif, unsigned long newjif,
void can_stat_update(struct timer_list *t)
{
struct net *net = from_timer(net, t, can.can_stattimer);
struct s_stats *can_stats = net->can.can_stats;
struct net *net = from_timer(net, t, can.stattimer);
struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
unsigned long j = jiffies; /* snapshot */
/* restart counting in timer context on user request */
@@ -127,57 +128,57 @@ void can_stat_update(struct timer_list *t)
can_init_stats(net);
/* restart counting on jiffies overflow */
if (j < can_stats->jiffies_init)
if (j < pkg_stats->jiffies_init)
can_init_stats(net);
/* prevent overflow in calc_rate() */
if (can_stats->rx_frames > (ULONG_MAX / HZ))
if (pkg_stats->rx_frames > (ULONG_MAX / HZ))
can_init_stats(net);
/* prevent overflow in calc_rate() */
if (can_stats->tx_frames > (ULONG_MAX / HZ))
if (pkg_stats->tx_frames > (ULONG_MAX / HZ))
can_init_stats(net);
/* matches overflow - very improbable */
if (can_stats->matches > (ULONG_MAX / 100))
if (pkg_stats->matches > (ULONG_MAX / 100))
can_init_stats(net);
/* calc total values */
if (can_stats->rx_frames)
can_stats->total_rx_match_ratio = (can_stats->matches * 100) /
can_stats->rx_frames;
if (pkg_stats->rx_frames)
pkg_stats->total_rx_match_ratio = (pkg_stats->matches * 100) /
pkg_stats->rx_frames;
can_stats->total_tx_rate = calc_rate(can_stats->jiffies_init, j,
can_stats->tx_frames);
can_stats->total_rx_rate = calc_rate(can_stats->jiffies_init, j,
can_stats->rx_frames);
pkg_stats->total_tx_rate = calc_rate(pkg_stats->jiffies_init, j,
pkg_stats->tx_frames);
pkg_stats->total_rx_rate = calc_rate(pkg_stats->jiffies_init, j,
pkg_stats->rx_frames);
/* calc current values */
if (can_stats->rx_frames_delta)
can_stats->current_rx_match_ratio =
(can_stats->matches_delta * 100) /
can_stats->rx_frames_delta;
if (pkg_stats->rx_frames_delta)
pkg_stats->current_rx_match_ratio =
(pkg_stats->matches_delta * 100) /
pkg_stats->rx_frames_delta;
can_stats->current_tx_rate = calc_rate(0, HZ, can_stats->tx_frames_delta);
can_stats->current_rx_rate = calc_rate(0, HZ, can_stats->rx_frames_delta);
pkg_stats->current_tx_rate = calc_rate(0, HZ, pkg_stats->tx_frames_delta);
pkg_stats->current_rx_rate = calc_rate(0, HZ, pkg_stats->rx_frames_delta);
/* check / update maximum values */
if (can_stats->max_tx_rate < can_stats->current_tx_rate)
can_stats->max_tx_rate = can_stats->current_tx_rate;
if (pkg_stats->max_tx_rate < pkg_stats->current_tx_rate)
pkg_stats->max_tx_rate = pkg_stats->current_tx_rate;
if (can_stats->max_rx_rate < can_stats->current_rx_rate)
can_stats->max_rx_rate = can_stats->current_rx_rate;
if (pkg_stats->max_rx_rate < pkg_stats->current_rx_rate)
pkg_stats->max_rx_rate = pkg_stats->current_rx_rate;
if (can_stats->max_rx_match_ratio < can_stats->current_rx_match_ratio)
can_stats->max_rx_match_ratio = can_stats->current_rx_match_ratio;
if (pkg_stats->max_rx_match_ratio < pkg_stats->current_rx_match_ratio)
pkg_stats->max_rx_match_ratio = pkg_stats->current_rx_match_ratio;
/* clear values for 'current rate' calculation */
can_stats->tx_frames_delta = 0;
can_stats->rx_frames_delta = 0;
can_stats->matches_delta = 0;
pkg_stats->tx_frames_delta = 0;
pkg_stats->rx_frames_delta = 0;
pkg_stats->matches_delta = 0;
/* restart timer (one second) */
mod_timer(&net->can.can_stattimer, round_jiffies(jiffies + HZ));
mod_timer(&net->can.stattimer, round_jiffies(jiffies + HZ));
}
/*
@@ -212,60 +213,60 @@ static void can_print_recv_banner(struct seq_file *m)
static int can_stats_proc_show(struct seq_file *m, void *v)
{
struct net *net = m->private;
struct s_stats *can_stats = net->can.can_stats;
struct s_pstats *can_pstats = net->can.can_pstats;
struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
seq_putc(m, '\n');
seq_printf(m, " %8ld transmitted frames (TXF)\n", can_stats->tx_frames);
seq_printf(m, " %8ld received frames (RXF)\n", can_stats->rx_frames);
seq_printf(m, " %8ld matched frames (RXMF)\n", can_stats->matches);
seq_printf(m, " %8ld transmitted frames (TXF)\n", pkg_stats->tx_frames);
seq_printf(m, " %8ld received frames (RXF)\n", pkg_stats->rx_frames);
seq_printf(m, " %8ld matched frames (RXMF)\n", pkg_stats->matches);
seq_putc(m, '\n');
if (net->can.can_stattimer.function == can_stat_update) {
if (net->can.stattimer.function == can_stat_update) {
seq_printf(m, " %8ld %% total match ratio (RXMR)\n",
can_stats->total_rx_match_ratio);
pkg_stats->total_rx_match_ratio);
seq_printf(m, " %8ld frames/s total tx rate (TXR)\n",
can_stats->total_tx_rate);
pkg_stats->total_tx_rate);
seq_printf(m, " %8ld frames/s total rx rate (RXR)\n",
can_stats->total_rx_rate);
pkg_stats->total_rx_rate);
seq_putc(m, '\n');
seq_printf(m, " %8ld %% current match ratio (CRXMR)\n",
can_stats->current_rx_match_ratio);
pkg_stats->current_rx_match_ratio);
seq_printf(m, " %8ld frames/s current tx rate (CTXR)\n",
can_stats->current_tx_rate);
pkg_stats->current_tx_rate);
seq_printf(m, " %8ld frames/s current rx rate (CRXR)\n",
can_stats->current_rx_rate);
pkg_stats->current_rx_rate);
seq_putc(m, '\n');
seq_printf(m, " %8ld %% max match ratio (MRXMR)\n",
can_stats->max_rx_match_ratio);
pkg_stats->max_rx_match_ratio);
seq_printf(m, " %8ld frames/s max tx rate (MTXR)\n",
can_stats->max_tx_rate);
pkg_stats->max_tx_rate);
seq_printf(m, " %8ld frames/s max rx rate (MRXR)\n",
can_stats->max_rx_rate);
pkg_stats->max_rx_rate);
seq_putc(m, '\n');
}
seq_printf(m, " %8ld current receive list entries (CRCV)\n",
can_pstats->rcv_entries);
rcv_lists_stats->rcv_entries);
seq_printf(m, " %8ld maximum receive list entries (MRCV)\n",
can_pstats->rcv_entries_max);
rcv_lists_stats->rcv_entries_max);
if (can_pstats->stats_reset)
if (rcv_lists_stats->stats_reset)
seq_printf(m, "\n %8ld statistic resets (STR)\n",
can_pstats->stats_reset);
rcv_lists_stats->stats_reset);
if (can_pstats->user_reset)
if (rcv_lists_stats->user_reset)
seq_printf(m, " %8ld user statistic resets (USTR)\n",
can_pstats->user_reset);
rcv_lists_stats->user_reset);
seq_putc(m, '\n');
return 0;
@@ -274,20 +275,20 @@ static int can_stats_proc_show(struct seq_file *m, void *v)
static int can_reset_stats_proc_show(struct seq_file *m, void *v)
{
struct net *net = m->private;
struct s_pstats *can_pstats = net->can.can_pstats;
struct s_stats *can_stats = net->can.can_stats;
struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
user_reset = 1;
if (net->can.can_stattimer.function == can_stat_update) {
if (net->can.stattimer.function == can_stat_update) {
seq_printf(m, "Scheduled statistic reset #%ld.\n",
can_pstats->stats_reset + 1);
rcv_lists_stats->stats_reset + 1);
} else {
if (can_stats->jiffies_init != jiffies)
if (pkg_stats->jiffies_init != jiffies)
can_init_stats(net);
seq_printf(m, "Performed statistic reset #%ld.\n",
can_pstats->stats_reset);
rcv_lists_stats->stats_reset);
}
return 0;
}
@@ -300,11 +301,11 @@ static int can_version_proc_show(struct seq_file *m, void *v)
static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx,
struct net_device *dev,
struct can_dev_rcv_lists *d)
struct can_dev_rcv_lists *dev_rcv_lists)
{
if (!hlist_empty(&d->rx[idx])) {
if (!hlist_empty(&dev_rcv_lists->rx[idx])) {
can_print_recv_banner(m);
can_print_rcvlist(m, &d->rx[idx], dev);
can_print_rcvlist(m, &dev_rcv_lists->rx[idx], dev);
} else
seq_printf(m, " (%s: no entry)\n", DNAME(dev));
@@ -315,7 +316,7 @@ static int can_rcvlist_proc_show(struct seq_file *m, void *v)
/* double cast to prevent GCC warning */
int idx = (int)(long)PDE_DATA(m->file->f_inode);
struct net_device *dev;
struct can_dev_rcv_lists *d;
struct can_dev_rcv_lists *dev_rcv_lists;
struct net *net = m->private;
seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]);
@@ -323,8 +324,8 @@ static int can_rcvlist_proc_show(struct seq_file *m, void *v)
rcu_read_lock();
/* receive list for 'all' CAN devices (dev == NULL) */
d = net->can.can_rx_alldev_list;
can_rcvlist_proc_show_one(m, idx, NULL, d);
dev_rcv_lists = net->can.rx_alldev_list;
can_rcvlist_proc_show_one(m, idx, NULL, dev_rcv_lists);
/* receive list for registered CAN devices */
for_each_netdev_rcu(net, dev) {
@@ -366,7 +367,7 @@ static inline void can_rcvlist_proc_show_array(struct seq_file *m,
static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
{
struct net_device *dev;
struct can_dev_rcv_lists *d;
struct can_dev_rcv_lists *dev_rcv_lists;
struct net *net = m->private;
/* RX_SFF */
@@ -375,15 +376,16 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
rcu_read_lock();
/* sff receive list for 'all' CAN devices (dev == NULL) */
d = net->can.can_rx_alldev_list;
can_rcvlist_proc_show_array(m, NULL, d->rx_sff, ARRAY_SIZE(d->rx_sff));
dev_rcv_lists = net->can.rx_alldev_list;
can_rcvlist_proc_show_array(m, NULL, dev_rcv_lists->rx_sff,
ARRAY_SIZE(dev_rcv_lists->rx_sff));
/* sff receive list for registered CAN devices */
for_each_netdev_rcu(net, dev) {
if (dev->type == ARPHRD_CAN && dev->ml_priv) {
d = dev->ml_priv;
can_rcvlist_proc_show_array(m, dev, d->rx_sff,
ARRAY_SIZE(d->rx_sff));
dev_rcv_lists = dev->ml_priv;
can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_sff,
ARRAY_SIZE(dev_rcv_lists->rx_sff));
}
}
@@ -396,7 +398,7 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
{
struct net_device *dev;
struct can_dev_rcv_lists *d;
struct can_dev_rcv_lists *dev_rcv_lists;
struct net *net = m->private;
/* RX_EFF */
@@ -405,15 +407,16 @@ static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
rcu_read_lock();
/* eff receive list for 'all' CAN devices (dev == NULL) */
d = net->can.can_rx_alldev_list;
can_rcvlist_proc_show_array(m, NULL, d->rx_eff, ARRAY_SIZE(d->rx_eff));
dev_rcv_lists = net->can.rx_alldev_list;
can_rcvlist_proc_show_array(m, NULL, dev_rcv_lists->rx_eff,
ARRAY_SIZE(dev_rcv_lists->rx_eff));
/* eff receive list for registered CAN devices */
for_each_netdev_rcu(net, dev) {
if (dev->type == ARPHRD_CAN && dev->ml_priv) {
d = dev->ml_priv;
can_rcvlist_proc_show_array(m, dev, d->rx_eff,
ARRAY_SIZE(d->rx_eff));
dev_rcv_lists = dev->ml_priv;
can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_eff,
ARRAY_SIZE(dev_rcv_lists->rx_eff));
}
}

View File

@@ -396,7 +396,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
int err = 0;
int notify_enetdown = 0;
if (len < sizeof(*addr))
if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
return -EINVAL;
if (addr->can_family != AF_CAN)
return -EINVAL;
@@ -733,7 +733,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
if (msg->msg_namelen < sizeof(*addr))
if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
return -EINVAL;
if (addr->can_family != AF_CAN)