ef100_rep.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /****************************************************************************
  3. * Driver for Solarflare network controllers and boards
  4. * Copyright 2019 Solarflare Communications Inc.
  5. * Copyright 2020-2022 Xilinx Inc.
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License version 2 as published
  9. * by the Free Software Foundation, incorporated herein by reference.
  10. */
  11. #include "ef100_rep.h"
  12. #include "ef100_netdev.h"
  13. #include "ef100_nic.h"
  14. #include "mae.h"
  15. #include "rx_common.h"
  16. #include "tc_bindings.h"
  17. #define EFX_EF100_REP_DRIVER "efx_ef100_rep"
  18. #define EFX_REP_DEFAULT_PSEUDO_RING_SIZE 64
  19. static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
  20. static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
  21. unsigned int i)
  22. {
  23. efv->parent = efx;
  24. efv->idx = i;
  25. INIT_LIST_HEAD(&efv->list);
  26. efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
  27. INIT_LIST_HEAD(&efv->dflt.acts.list);
  28. INIT_LIST_HEAD(&efv->rx_list);
  29. spin_lock_init(&efv->rx_lock);
  30. efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
  31. NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
  32. NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
  33. NETIF_MSG_TX_ERR | NETIF_MSG_HW;
  34. return 0;
  35. }
  36. static int efx_ef100_rep_open(struct net_device *net_dev)
  37. {
  38. struct efx_rep *efv = netdev_priv(net_dev);
  39. netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll);
  40. napi_enable(&efv->napi);
  41. return 0;
  42. }
  43. static int efx_ef100_rep_close(struct net_device *net_dev)
  44. {
  45. struct efx_rep *efv = netdev_priv(net_dev);
  46. napi_disable(&efv->napi);
  47. netif_napi_del(&efv->napi);
  48. return 0;
  49. }
  50. static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
  51. struct net_device *dev)
  52. {
  53. struct efx_rep *efv = netdev_priv(dev);
  54. struct efx_nic *efx = efv->parent;
  55. netdev_tx_t rc;
  56. /* __ef100_hard_start_xmit() will always return success even in the
  57. * case of TX drops, where it will increment efx's tx_dropped. The
  58. * efv stats really only count attempted TX, not success/failure.
  59. */
  60. atomic64_inc(&efv->stats.tx_packets);
  61. atomic64_add(skb->len, &efv->stats.tx_bytes);
  62. netif_tx_lock(efx->net_dev);
  63. rc = __ef100_hard_start_xmit(skb, efx, dev, efv);
  64. netif_tx_unlock(efx->net_dev);
  65. return rc;
  66. }
  67. static int efx_ef100_rep_get_port_parent_id(struct net_device *dev,
  68. struct netdev_phys_item_id *ppid)
  69. {
  70. struct efx_rep *efv = netdev_priv(dev);
  71. struct efx_nic *efx = efv->parent;
  72. struct ef100_nic_data *nic_data;
  73. nic_data = efx->nic_data;
  74. /* nic_data->port_id is a u8[] */
  75. ppid->id_len = sizeof(nic_data->port_id);
  76. memcpy(ppid->id, nic_data->port_id, sizeof(nic_data->port_id));
  77. return 0;
  78. }
  79. static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
  80. char *buf, size_t len)
  81. {
  82. struct efx_rep *efv = netdev_priv(dev);
  83. struct efx_nic *efx = efv->parent;
  84. struct ef100_nic_data *nic_data;
  85. int ret;
  86. nic_data = efx->nic_data;
  87. ret = snprintf(buf, len, "p%upf%uvf%u", efx->port_num,
  88. nic_data->pf_index, efv->idx);
  89. if (ret >= len)
  90. return -EOPNOTSUPP;
  91. return 0;
  92. }
  93. static int efx_ef100_rep_setup_tc(struct net_device *net_dev,
  94. enum tc_setup_type type, void *type_data)
  95. {
  96. struct efx_rep *efv = netdev_priv(net_dev);
  97. struct efx_nic *efx = efv->parent;
  98. if (type == TC_SETUP_CLSFLOWER)
  99. return efx_tc_flower(efx, net_dev, type_data, efv);
  100. if (type == TC_SETUP_BLOCK)
  101. return efx_tc_setup_block(net_dev, efx, type_data, efv);
  102. return -EOPNOTSUPP;
  103. }
  104. static void efx_ef100_rep_get_stats64(struct net_device *dev,
  105. struct rtnl_link_stats64 *stats)
  106. {
  107. struct efx_rep *efv = netdev_priv(dev);
  108. stats->rx_packets = atomic64_read(&efv->stats.rx_packets);
  109. stats->tx_packets = atomic64_read(&efv->stats.tx_packets);
  110. stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes);
  111. stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes);
  112. stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped);
  113. stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
  114. }
  115. const struct net_device_ops efx_ef100_rep_netdev_ops = {
  116. .ndo_open = efx_ef100_rep_open,
  117. .ndo_stop = efx_ef100_rep_close,
  118. .ndo_start_xmit = efx_ef100_rep_xmit,
  119. .ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
  120. .ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
  121. .ndo_get_stats64 = efx_ef100_rep_get_stats64,
  122. .ndo_setup_tc = efx_ef100_rep_setup_tc,
  123. };
  124. static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
  125. struct ethtool_drvinfo *drvinfo)
  126. {
  127. strscpy(drvinfo->driver, EFX_EF100_REP_DRIVER, sizeof(drvinfo->driver));
  128. }
  129. static u32 efx_ef100_rep_ethtool_get_msglevel(struct net_device *net_dev)
  130. {
  131. struct efx_rep *efv = netdev_priv(net_dev);
  132. return efv->msg_enable;
  133. }
  134. static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev,
  135. u32 msg_enable)
  136. {
  137. struct efx_rep *efv = netdev_priv(net_dev);
  138. efv->msg_enable = msg_enable;
  139. }
  140. static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev,
  141. struct ethtool_ringparam *ring,
  142. struct kernel_ethtool_ringparam *kring,
  143. struct netlink_ext_ack *ext_ack)
  144. {
  145. struct efx_rep *efv = netdev_priv(net_dev);
  146. ring->rx_max_pending = U32_MAX;
  147. ring->rx_pending = efv->rx_pring_size;
  148. }
  149. static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev,
  150. struct ethtool_ringparam *ring,
  151. struct kernel_ethtool_ringparam *kring,
  152. struct netlink_ext_ack *ext_ack)
  153. {
  154. struct efx_rep *efv = netdev_priv(net_dev);
  155. if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending)
  156. return -EINVAL;
  157. efv->rx_pring_size = ring->rx_pending;
  158. return 0;
  159. }
  160. static const struct ethtool_ops efx_ef100_rep_ethtool_ops = {
  161. .get_drvinfo = efx_ef100_rep_get_drvinfo,
  162. .get_msglevel = efx_ef100_rep_ethtool_get_msglevel,
  163. .set_msglevel = efx_ef100_rep_ethtool_set_msglevel,
  164. .get_ringparam = efx_ef100_rep_ethtool_get_ringparam,
  165. .set_ringparam = efx_ef100_rep_ethtool_set_ringparam,
  166. };
  167. static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
  168. unsigned int i)
  169. {
  170. struct net_device *net_dev;
  171. struct efx_rep *efv;
  172. int rc;
  173. net_dev = alloc_etherdev_mq(sizeof(*efv), 1);
  174. if (!net_dev)
  175. return ERR_PTR(-ENOMEM);
  176. efv = netdev_priv(net_dev);
  177. rc = efx_ef100_rep_init_struct(efx, efv, i);
  178. if (rc)
  179. goto fail1;
  180. efv->net_dev = net_dev;
  181. rtnl_lock();
  182. spin_lock_bh(&efx->vf_reps_lock);
  183. list_add_tail(&efv->list, &efx->vf_reps);
  184. spin_unlock_bh(&efx->vf_reps_lock);
  185. if (netif_running(efx->net_dev) && efx->state == STATE_NET_UP) {
  186. netif_device_attach(net_dev);
  187. netif_carrier_on(net_dev);
  188. } else {
  189. netif_carrier_off(net_dev);
  190. netif_tx_stop_all_queues(net_dev);
  191. }
  192. rtnl_unlock();
  193. net_dev->netdev_ops = &efx_ef100_rep_netdev_ops;
  194. net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops;
  195. net_dev->min_mtu = EFX_MIN_MTU;
  196. net_dev->max_mtu = EFX_MAX_MTU;
  197. net_dev->features |= NETIF_F_LLTX;
  198. net_dev->hw_features |= NETIF_F_LLTX;
  199. return efv;
  200. fail1:
  201. free_netdev(net_dev);
  202. return ERR_PTR(rc);
  203. }
  204. static int efx_ef100_configure_rep(struct efx_rep *efv)
  205. {
  206. struct efx_nic *efx = efv->parent;
  207. u32 selector;
  208. int rc;
  209. efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
  210. /* Construct mport selector for corresponding VF */
  211. efx_mae_mport_vf(efx, efv->idx, &selector);
  212. /* Look up actual mport ID */
  213. rc = efx_mae_lookup_mport(efx, selector, &efv->mport);
  214. if (rc)
  215. return rc;
  216. pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport);
  217. /* mport label should fit in 16 bits */
  218. WARN_ON(efv->mport >> 16);
  219. return efx_tc_configure_default_rule_rep(efv);
  220. }
  221. static void efx_ef100_deconfigure_rep(struct efx_rep *efv)
  222. {
  223. struct efx_nic *efx = efv->parent;
  224. efx_tc_deconfigure_default_rule(efx, &efv->dflt);
  225. }
  226. static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
  227. {
  228. struct efx_nic *efx = efv->parent;
  229. rtnl_lock();
  230. spin_lock_bh(&efx->vf_reps_lock);
  231. list_del(&efv->list);
  232. spin_unlock_bh(&efx->vf_reps_lock);
  233. rtnl_unlock();
  234. synchronize_rcu();
  235. free_netdev(efv->net_dev);
  236. }
  237. int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
  238. {
  239. struct efx_rep *efv;
  240. int rc;
  241. efv = efx_ef100_rep_create_netdev(efx, i);
  242. if (IS_ERR(efv)) {
  243. rc = PTR_ERR(efv);
  244. pci_err(efx->pci_dev,
  245. "Failed to create representor for VF %d, rc %d\n", i,
  246. rc);
  247. return rc;
  248. }
  249. rc = efx_ef100_configure_rep(efv);
  250. if (rc) {
  251. pci_err(efx->pci_dev,
  252. "Failed to configure representor for VF %d, rc %d\n",
  253. i, rc);
  254. goto fail1;
  255. }
  256. rc = register_netdev(efv->net_dev);
  257. if (rc) {
  258. pci_err(efx->pci_dev,
  259. "Failed to register representor for VF %d, rc %d\n",
  260. i, rc);
  261. goto fail2;
  262. }
  263. pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i,
  264. efv->net_dev->name);
  265. return 0;
  266. fail2:
  267. efx_ef100_deconfigure_rep(efv);
  268. fail1:
  269. efx_ef100_rep_destroy_netdev(efv);
  270. return rc;
  271. }
  272. void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
  273. {
  274. struct net_device *rep_dev;
  275. rep_dev = efv->net_dev;
  276. if (!rep_dev)
  277. return;
  278. netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
  279. unregister_netdev(rep_dev);
  280. efx_ef100_deconfigure_rep(efv);
  281. efx_ef100_rep_destroy_netdev(efv);
  282. }
  283. void efx_ef100_fini_vfreps(struct efx_nic *efx)
  284. {
  285. struct ef100_nic_data *nic_data = efx->nic_data;
  286. struct efx_rep *efv, *next;
  287. if (!nic_data->grp_mae)
  288. return;
  289. list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
  290. efx_ef100_vfrep_destroy(efx, efv);
  291. }
  292. static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
  293. {
  294. struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
  295. unsigned int read_index;
  296. struct list_head head;
  297. struct sk_buff *skb;
  298. bool need_resched;
  299. int spent = 0;
  300. INIT_LIST_HEAD(&head);
  301. /* Grab up to 'weight' pending SKBs */
  302. spin_lock_bh(&efv->rx_lock);
  303. read_index = efv->write_index;
  304. while (spent < weight && !list_empty(&efv->rx_list)) {
  305. skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
  306. list_del(&skb->list);
  307. list_add_tail(&skb->list, &head);
  308. spent++;
  309. }
  310. spin_unlock_bh(&efv->rx_lock);
  311. /* Receive them */
  312. netif_receive_skb_list(&head);
  313. if (spent < weight)
  314. if (napi_complete_done(napi, spent)) {
  315. spin_lock_bh(&efv->rx_lock);
  316. efv->read_index = read_index;
  317. /* If write_index advanced while we were doing the
  318. * RX, then storing our read_index won't re-prime the
  319. * fake-interrupt. In that case, we need to schedule
  320. * NAPI again to consume the additional packet(s).
  321. */
  322. need_resched = efv->write_index != read_index;
  323. spin_unlock_bh(&efv->rx_lock);
  324. if (need_resched)
  325. napi_schedule(&efv->napi);
  326. }
  327. return spent;
  328. }
  329. void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf)
  330. {
  331. u8 *eh = efx_rx_buf_va(rx_buf);
  332. struct sk_buff *skb;
  333. bool primed;
  334. /* Don't allow too many queued SKBs to build up, as they consume
  335. * GFP_ATOMIC memory. If we overrun, just start dropping.
  336. */
  337. if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) {
  338. atomic64_inc(&efv->stats.rx_dropped);
  339. if (net_ratelimit())
  340. netif_dbg(efv->parent, rx_err, efv->net_dev,
  341. "nodesc-dropped packet of length %u\n",
  342. rx_buf->len);
  343. return;
  344. }
  345. skb = netdev_alloc_skb(efv->net_dev, rx_buf->len);
  346. if (!skb) {
  347. atomic64_inc(&efv->stats.rx_dropped);
  348. if (net_ratelimit())
  349. netif_dbg(efv->parent, rx_err, efv->net_dev,
  350. "noskb-dropped packet of length %u\n",
  351. rx_buf->len);
  352. return;
  353. }
  354. memcpy(skb->data, eh, rx_buf->len);
  355. __skb_put(skb, rx_buf->len);
  356. skb_record_rx_queue(skb, 0); /* rep is single-queue */
  357. /* Move past the ethernet header */
  358. skb->protocol = eth_type_trans(skb, efv->net_dev);
  359. skb_checksum_none_assert(skb);
  360. atomic64_inc(&efv->stats.rx_packets);
  361. atomic64_add(rx_buf->len, &efv->stats.rx_bytes);
  362. /* Add it to the rx list */
  363. spin_lock_bh(&efv->rx_lock);
  364. primed = efv->read_index == efv->write_index;
  365. list_add_tail(&skb->list, &efv->rx_list);
  366. efv->write_index++;
  367. spin_unlock_bh(&efv->rx_lock);
  368. /* Trigger rx work */
  369. if (primed)
  370. napi_schedule(&efv->napi);
  371. }
  372. struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport)
  373. {
  374. struct efx_rep *efv, *out = NULL;
  375. /* spinlock guards against list mutation while we're walking it;
  376. * but caller must also hold rcu_read_lock() to ensure the netdev
  377. * isn't freed after we drop the spinlock.
  378. */
  379. spin_lock_bh(&efx->vf_reps_lock);
  380. list_for_each_entry(efv, &efx->vf_reps, list)
  381. if (efv->mport == mport) {
  382. out = efv;
  383. break;
  384. }
  385. spin_unlock_bh(&efx->vf_reps_lock);
  386. return out;
  387. }