vport-internal_dev.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2007-2012 Nicira, Inc.
  4. */
  5. #include <linux/if_vlan.h>
  6. #include <linux/kernel.h>
  7. #include <linux/netdevice.h>
  8. #include <linux/etherdevice.h>
  9. #include <linux/ethtool.h>
  10. #include <linux/skbuff.h>
  11. #include <net/dst.h>
  12. #include <net/xfrm.h>
  13. #include <net/rtnetlink.h>
  14. #include "datapath.h"
  15. #include "vport-internal_dev.h"
  16. #include "vport-netdev.h"
  17. struct internal_dev {
  18. struct vport *vport;
  19. };
  20. static struct vport_ops ovs_internal_vport_ops;
  21. static struct internal_dev *internal_dev_priv(struct net_device *netdev)
  22. {
  23. return netdev_priv(netdev);
  24. }
  25. /* Called with rcu_read_lock_bh. */
  26. static netdev_tx_t
  27. internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
  28. {
  29. int len, err;
  30. /* store len value because skb can be freed inside ovs_vport_receive() */
  31. len = skb->len;
  32. rcu_read_lock();
  33. err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
  34. rcu_read_unlock();
  35. if (likely(!err))
  36. dev_sw_netstats_tx_add(netdev, 1, len);
  37. else
  38. netdev->stats.tx_errors++;
  39. return NETDEV_TX_OK;
  40. }
  41. static int internal_dev_open(struct net_device *netdev)
  42. {
  43. netif_start_queue(netdev);
  44. return 0;
  45. }
  46. static int internal_dev_stop(struct net_device *netdev)
  47. {
  48. netif_stop_queue(netdev);
  49. return 0;
  50. }
  51. static void internal_dev_getinfo(struct net_device *netdev,
  52. struct ethtool_drvinfo *info)
  53. {
  54. strscpy(info->driver, "openvswitch", sizeof(info->driver));
  55. }
  56. static const struct ethtool_ops internal_dev_ethtool_ops = {
  57. .get_drvinfo = internal_dev_getinfo,
  58. .get_link = ethtool_op_get_link,
  59. };
  60. static void internal_dev_destructor(struct net_device *dev)
  61. {
  62. struct vport *vport = ovs_internal_dev_get_vport(dev);
  63. ovs_vport_free(vport);
  64. }
  65. static const struct net_device_ops internal_dev_netdev_ops = {
  66. .ndo_open = internal_dev_open,
  67. .ndo_stop = internal_dev_stop,
  68. .ndo_start_xmit = internal_dev_xmit,
  69. .ndo_set_mac_address = eth_mac_addr,
  70. .ndo_get_stats64 = dev_get_tstats64,
  71. };
  72. static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
  73. .kind = "openvswitch",
  74. };
  75. static void do_setup(struct net_device *netdev)
  76. {
  77. ether_setup(netdev);
  78. netdev->max_mtu = ETH_MAX_MTU;
  79. netdev->netdev_ops = &internal_dev_netdev_ops;
  80. netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
  81. netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
  82. IFF_NO_QUEUE;
  83. netdev->needs_free_netdev = true;
  84. netdev->priv_destructor = NULL;
  85. netdev->ethtool_ops = &internal_dev_ethtool_ops;
  86. netdev->rtnl_link_ops = &internal_dev_link_ops;
  87. netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
  88. NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
  89. NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL;
  90. netdev->vlan_features = netdev->features;
  91. netdev->hw_enc_features = netdev->features;
  92. netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
  93. netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
  94. eth_hw_addr_random(netdev);
  95. }
  96. static struct vport *internal_dev_create(const struct vport_parms *parms)
  97. {
  98. struct vport *vport;
  99. struct internal_dev *internal_dev;
  100. struct net_device *dev;
  101. int err;
  102. vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
  103. if (IS_ERR(vport)) {
  104. err = PTR_ERR(vport);
  105. goto error;
  106. }
  107. dev = alloc_netdev(sizeof(struct internal_dev),
  108. parms->name, NET_NAME_USER, do_setup);
  109. vport->dev = dev;
  110. if (!vport->dev) {
  111. err = -ENOMEM;
  112. goto error_free_vport;
  113. }
  114. vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
  115. if (!vport->dev->tstats) {
  116. err = -ENOMEM;
  117. goto error_free_netdev;
  118. }
  119. dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
  120. dev->ifindex = parms->desired_ifindex;
  121. internal_dev = internal_dev_priv(vport->dev);
  122. internal_dev->vport = vport;
  123. /* Restrict bridge port to current netns. */
  124. if (vport->port_no == OVSP_LOCAL)
  125. vport->dev->features |= NETIF_F_NETNS_LOCAL;
  126. rtnl_lock();
  127. err = register_netdevice(vport->dev);
  128. if (err)
  129. goto error_unlock;
  130. vport->dev->priv_destructor = internal_dev_destructor;
  131. dev_set_promiscuity(vport->dev, 1);
  132. rtnl_unlock();
  133. netif_start_queue(vport->dev);
  134. return vport;
  135. error_unlock:
  136. rtnl_unlock();
  137. free_percpu(dev->tstats);
  138. error_free_netdev:
  139. free_netdev(dev);
  140. error_free_vport:
  141. ovs_vport_free(vport);
  142. error:
  143. return ERR_PTR(err);
  144. }
  145. static void internal_dev_destroy(struct vport *vport)
  146. {
  147. netif_stop_queue(vport->dev);
  148. rtnl_lock();
  149. dev_set_promiscuity(vport->dev, -1);
  150. /* unregister_netdevice() waits for an RCU grace period. */
  151. unregister_netdevice(vport->dev);
  152. free_percpu(vport->dev->tstats);
  153. rtnl_unlock();
  154. }
  155. static int internal_dev_recv(struct sk_buff *skb)
  156. {
  157. struct net_device *netdev = skb->dev;
  158. if (unlikely(!(netdev->flags & IFF_UP))) {
  159. kfree_skb(skb);
  160. netdev->stats.rx_dropped++;
  161. return NETDEV_TX_OK;
  162. }
  163. skb_dst_drop(skb);
  164. nf_reset_ct(skb);
  165. secpath_reset(skb);
  166. skb->pkt_type = PACKET_HOST;
  167. skb->protocol = eth_type_trans(skb, netdev);
  168. skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
  169. dev_sw_netstats_rx_add(netdev, skb->len);
  170. netif_rx(skb);
  171. return NETDEV_TX_OK;
  172. }
  173. static struct vport_ops ovs_internal_vport_ops = {
  174. .type = OVS_VPORT_TYPE_INTERNAL,
  175. .create = internal_dev_create,
  176. .destroy = internal_dev_destroy,
  177. .send = internal_dev_recv,
  178. };
  179. int ovs_is_internal_dev(const struct net_device *netdev)
  180. {
  181. return netdev->netdev_ops == &internal_dev_netdev_ops;
  182. }
  183. struct vport *ovs_internal_dev_get_vport(struct net_device *netdev)
  184. {
  185. if (!ovs_is_internal_dev(netdev))
  186. return NULL;
  187. return internal_dev_priv(netdev)->vport;
  188. }
  189. int ovs_internal_dev_rtnl_link_register(void)
  190. {
  191. int err;
  192. err = rtnl_link_register(&internal_dev_link_ops);
  193. if (err < 0)
  194. return err;
  195. err = ovs_vport_ops_register(&ovs_internal_vport_ops);
  196. if (err < 0)
  197. rtnl_link_unregister(&internal_dev_link_ops);
  198. return err;
  199. }
  200. void ovs_internal_dev_rtnl_link_unregister(void)
  201. {
  202. ovs_vport_ops_unregister(&ovs_internal_vport_ops);
  203. rtnl_link_unregister(&internal_dev_link_ops);
  204. }