vxcan.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * vxcan.c - Virtual CAN Tunnel for cross namespace communication
  4. *
  5. * This code is derived from drivers/net/can/vcan.c for the virtual CAN
  6. * specific parts and from drivers/net/veth.c to implement the netlink API
  7. * for network interface pairs in a common and established way.
  8. *
  9. * Copyright (c) 2017 Oliver Hartkopp <[email protected]>
  10. */
  11. #include <linux/ethtool.h>
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/if_arp.h>
  16. #include <linux/if_ether.h>
  17. #include <linux/can.h>
  18. #include <linux/can/dev.h>
  19. #include <linux/can/skb.h>
  20. #include <linux/can/vxcan.h>
  21. #include <linux/can/can-ml.h>
  22. #include <linux/slab.h>
  23. #include <net/rtnetlink.h>
  24. #define DRV_NAME "vxcan"
  25. MODULE_DESCRIPTION("Virtual CAN Tunnel");
  26. MODULE_LICENSE("GPL");
  27. MODULE_AUTHOR("Oliver Hartkopp <[email protected]>");
  28. MODULE_ALIAS_RTNL_LINK(DRV_NAME);
  29. struct vxcan_priv {
  30. struct net_device __rcu *peer;
  31. };
  32. static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
  33. {
  34. struct vxcan_priv *priv = netdev_priv(dev);
  35. struct net_device *peer;
  36. struct net_device_stats *peerstats, *srcstats = &dev->stats;
  37. struct sk_buff *skb;
  38. unsigned int len;
  39. if (can_dropped_invalid_skb(dev, oskb))
  40. return NETDEV_TX_OK;
  41. rcu_read_lock();
  42. peer = rcu_dereference(priv->peer);
  43. if (unlikely(!peer)) {
  44. kfree_skb(oskb);
  45. dev->stats.tx_dropped++;
  46. goto out_unlock;
  47. }
  48. skb_tx_timestamp(oskb);
  49. skb = skb_clone(oskb, GFP_ATOMIC);
  50. if (skb) {
  51. consume_skb(oskb);
  52. } else {
  53. kfree_skb(oskb);
  54. goto out_unlock;
  55. }
  56. /* reset CAN GW hop counter */
  57. skb->csum_start = 0;
  58. skb->pkt_type = PACKET_BROADCAST;
  59. skb->dev = peer;
  60. skb->ip_summed = CHECKSUM_UNNECESSARY;
  61. len = can_skb_get_data_len(skb);
  62. if (netif_rx(skb) == NET_RX_SUCCESS) {
  63. srcstats->tx_packets++;
  64. srcstats->tx_bytes += len;
  65. peerstats = &peer->stats;
  66. peerstats->rx_packets++;
  67. peerstats->rx_bytes += len;
  68. }
  69. out_unlock:
  70. rcu_read_unlock();
  71. return NETDEV_TX_OK;
  72. }
  73. static int vxcan_open(struct net_device *dev)
  74. {
  75. struct vxcan_priv *priv = netdev_priv(dev);
  76. struct net_device *peer = rtnl_dereference(priv->peer);
  77. if (!peer)
  78. return -ENOTCONN;
  79. if (peer->flags & IFF_UP) {
  80. netif_carrier_on(dev);
  81. netif_carrier_on(peer);
  82. }
  83. return 0;
  84. }
  85. static int vxcan_close(struct net_device *dev)
  86. {
  87. struct vxcan_priv *priv = netdev_priv(dev);
  88. struct net_device *peer = rtnl_dereference(priv->peer);
  89. netif_carrier_off(dev);
  90. if (peer)
  91. netif_carrier_off(peer);
  92. return 0;
  93. }
  94. static int vxcan_get_iflink(const struct net_device *dev)
  95. {
  96. struct vxcan_priv *priv = netdev_priv(dev);
  97. struct net_device *peer;
  98. int iflink;
  99. rcu_read_lock();
  100. peer = rcu_dereference(priv->peer);
  101. iflink = peer ? peer->ifindex : 0;
  102. rcu_read_unlock();
  103. return iflink;
  104. }
  105. static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
  106. {
  107. /* Do not allow changing the MTU while running */
  108. if (dev->flags & IFF_UP)
  109. return -EBUSY;
  110. if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU &&
  111. !can_is_canxl_dev_mtu(new_mtu))
  112. return -EINVAL;
  113. dev->mtu = new_mtu;
  114. return 0;
  115. }
  116. static const struct net_device_ops vxcan_netdev_ops = {
  117. .ndo_open = vxcan_open,
  118. .ndo_stop = vxcan_close,
  119. .ndo_start_xmit = vxcan_xmit,
  120. .ndo_get_iflink = vxcan_get_iflink,
  121. .ndo_change_mtu = vxcan_change_mtu,
  122. };
  123. static const struct ethtool_ops vxcan_ethtool_ops = {
  124. .get_ts_info = ethtool_op_get_ts_info,
  125. };
  126. static void vxcan_setup(struct net_device *dev)
  127. {
  128. struct can_ml_priv *can_ml;
  129. dev->type = ARPHRD_CAN;
  130. dev->mtu = CANFD_MTU;
  131. dev->hard_header_len = 0;
  132. dev->addr_len = 0;
  133. dev->tx_queue_len = 0;
  134. dev->flags = IFF_NOARP;
  135. dev->netdev_ops = &vxcan_netdev_ops;
  136. dev->ethtool_ops = &vxcan_ethtool_ops;
  137. dev->needs_free_netdev = true;
  138. can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
  139. can_set_ml_priv(dev, can_ml);
  140. }
  141. /* forward declaration for rtnl_create_link() */
  142. static struct rtnl_link_ops vxcan_link_ops;
  143. static int vxcan_newlink(struct net *net, struct net_device *dev,
  144. struct nlattr *tb[], struct nlattr *data[],
  145. struct netlink_ext_ack *extack)
  146. {
  147. struct vxcan_priv *priv;
  148. struct net_device *peer;
  149. struct net *peer_net;
  150. struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
  151. char ifname[IFNAMSIZ];
  152. unsigned char name_assign_type;
  153. struct ifinfomsg *ifmp = NULL;
  154. int err;
  155. /* register peer device */
  156. if (data && data[VXCAN_INFO_PEER]) {
  157. struct nlattr *nla_peer;
  158. nla_peer = data[VXCAN_INFO_PEER];
  159. ifmp = nla_data(nla_peer);
  160. err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
  161. if (err < 0)
  162. return err;
  163. tbp = peer_tb;
  164. }
  165. if (ifmp && tbp[IFLA_IFNAME]) {
  166. nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
  167. name_assign_type = NET_NAME_USER;
  168. } else {
  169. snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
  170. name_assign_type = NET_NAME_ENUM;
  171. }
  172. peer_net = rtnl_link_get_net(net, tbp);
  173. if (IS_ERR(peer_net))
  174. return PTR_ERR(peer_net);
  175. peer = rtnl_create_link(peer_net, ifname, name_assign_type,
  176. &vxcan_link_ops, tbp, extack);
  177. if (IS_ERR(peer)) {
  178. put_net(peer_net);
  179. return PTR_ERR(peer);
  180. }
  181. if (ifmp && dev->ifindex)
  182. peer->ifindex = ifmp->ifi_index;
  183. err = register_netdevice(peer);
  184. put_net(peer_net);
  185. peer_net = NULL;
  186. if (err < 0) {
  187. free_netdev(peer);
  188. return err;
  189. }
  190. netif_carrier_off(peer);
  191. err = rtnl_configure_link(peer, ifmp);
  192. if (err < 0)
  193. goto unregister_network_device;
  194. /* register first device */
  195. if (tb[IFLA_IFNAME])
  196. nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
  197. else
  198. snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
  199. err = register_netdevice(dev);
  200. if (err < 0)
  201. goto unregister_network_device;
  202. netif_carrier_off(dev);
  203. /* cross link the device pair */
  204. priv = netdev_priv(dev);
  205. rcu_assign_pointer(priv->peer, peer);
  206. priv = netdev_priv(peer);
  207. rcu_assign_pointer(priv->peer, dev);
  208. return 0;
  209. unregister_network_device:
  210. unregister_netdevice(peer);
  211. return err;
  212. }
  213. static void vxcan_dellink(struct net_device *dev, struct list_head *head)
  214. {
  215. struct vxcan_priv *priv;
  216. struct net_device *peer;
  217. priv = netdev_priv(dev);
  218. peer = rtnl_dereference(priv->peer);
  219. /* Note : dellink() is called from default_device_exit_batch(),
  220. * before a rcu_synchronize() point. The devices are guaranteed
  221. * not being freed before one RCU grace period.
  222. */
  223. RCU_INIT_POINTER(priv->peer, NULL);
  224. unregister_netdevice_queue(dev, head);
  225. if (peer) {
  226. priv = netdev_priv(peer);
  227. RCU_INIT_POINTER(priv->peer, NULL);
  228. unregister_netdevice_queue(peer, head);
  229. }
  230. }
  231. static const struct nla_policy vxcan_policy[VXCAN_INFO_MAX + 1] = {
  232. [VXCAN_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
  233. };
  234. static struct net *vxcan_get_link_net(const struct net_device *dev)
  235. {
  236. struct vxcan_priv *priv = netdev_priv(dev);
  237. struct net_device *peer = rtnl_dereference(priv->peer);
  238. return peer ? dev_net(peer) : dev_net(dev);
  239. }
  240. static struct rtnl_link_ops vxcan_link_ops = {
  241. .kind = DRV_NAME,
  242. .priv_size = ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN) + sizeof(struct can_ml_priv),
  243. .setup = vxcan_setup,
  244. .newlink = vxcan_newlink,
  245. .dellink = vxcan_dellink,
  246. .policy = vxcan_policy,
  247. .maxtype = VXCAN_INFO_MAX,
  248. .get_link_net = vxcan_get_link_net,
  249. };
  250. static __init int vxcan_init(void)
  251. {
  252. pr_info("vxcan: Virtual CAN Tunnel driver\n");
  253. return rtnl_link_register(&vxcan_link_ops);
  254. }
  255. static __exit void vxcan_exit(void)
  256. {
  257. rtnl_link_unregister(&vxcan_link_ops);
  258. }
  259. module_init(vxcan_init);
  260. module_exit(vxcan_exit);