master.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Handling of a master device, switching frames via its switch fabric CPU port
  4. *
  5. * Copyright (c) 2017 Savoir-faire Linux Inc.
  6. * Vivien Didelot <[email protected]>
  7. */
  8. #include "dsa_priv.h"
  9. static int dsa_master_get_regs_len(struct net_device *dev)
  10. {
  11. struct dsa_port *cpu_dp = dev->dsa_ptr;
  12. const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
  13. struct dsa_switch *ds = cpu_dp->ds;
  14. int port = cpu_dp->index;
  15. int ret = 0;
  16. int len;
  17. if (ops->get_regs_len) {
  18. len = ops->get_regs_len(dev);
  19. if (len < 0)
  20. return len;
  21. ret += len;
  22. }
  23. ret += sizeof(struct ethtool_drvinfo);
  24. ret += sizeof(struct ethtool_regs);
  25. if (ds->ops->get_regs_len) {
  26. len = ds->ops->get_regs_len(ds, port);
  27. if (len < 0)
  28. return len;
  29. ret += len;
  30. }
  31. return ret;
  32. }
  33. static void dsa_master_get_regs(struct net_device *dev,
  34. struct ethtool_regs *regs, void *data)
  35. {
  36. struct dsa_port *cpu_dp = dev->dsa_ptr;
  37. const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
  38. struct dsa_switch *ds = cpu_dp->ds;
  39. struct ethtool_drvinfo *cpu_info;
  40. struct ethtool_regs *cpu_regs;
  41. int port = cpu_dp->index;
  42. int len;
  43. if (ops->get_regs_len && ops->get_regs) {
  44. len = ops->get_regs_len(dev);
  45. if (len < 0)
  46. return;
  47. regs->len = len;
  48. ops->get_regs(dev, regs, data);
  49. data += regs->len;
  50. }
  51. cpu_info = (struct ethtool_drvinfo *)data;
  52. strscpy(cpu_info->driver, "dsa", sizeof(cpu_info->driver));
  53. data += sizeof(*cpu_info);
  54. cpu_regs = (struct ethtool_regs *)data;
  55. data += sizeof(*cpu_regs);
  56. if (ds->ops->get_regs_len && ds->ops->get_regs) {
  57. len = ds->ops->get_regs_len(ds, port);
  58. if (len < 0)
  59. return;
  60. cpu_regs->len = len;
  61. ds->ops->get_regs(ds, port, cpu_regs, data);
  62. }
  63. }
  64. static void dsa_master_get_ethtool_stats(struct net_device *dev,
  65. struct ethtool_stats *stats,
  66. uint64_t *data)
  67. {
  68. struct dsa_port *cpu_dp = dev->dsa_ptr;
  69. const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
  70. struct dsa_switch *ds = cpu_dp->ds;
  71. int port = cpu_dp->index;
  72. int count = 0;
  73. if (ops->get_sset_count && ops->get_ethtool_stats) {
  74. count = ops->get_sset_count(dev, ETH_SS_STATS);
  75. ops->get_ethtool_stats(dev, stats, data);
  76. }
  77. if (ds->ops->get_ethtool_stats)
  78. ds->ops->get_ethtool_stats(ds, port, data + count);
  79. }
  80. static void dsa_master_get_ethtool_phy_stats(struct net_device *dev,
  81. struct ethtool_stats *stats,
  82. uint64_t *data)
  83. {
  84. struct dsa_port *cpu_dp = dev->dsa_ptr;
  85. const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
  86. struct dsa_switch *ds = cpu_dp->ds;
  87. int port = cpu_dp->index;
  88. int count = 0;
  89. if (dev->phydev && !ops->get_ethtool_phy_stats) {
  90. count = phy_ethtool_get_sset_count(dev->phydev);
  91. if (count >= 0)
  92. phy_ethtool_get_stats(dev->phydev, stats, data);
  93. } else if (ops->get_sset_count && ops->get_ethtool_phy_stats) {
  94. count = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
  95. ops->get_ethtool_phy_stats(dev, stats, data);
  96. }
  97. if (count < 0)
  98. count = 0;
  99. if (ds->ops->get_ethtool_phy_stats)
  100. ds->ops->get_ethtool_phy_stats(ds, port, data + count);
  101. }
  102. static int dsa_master_get_sset_count(struct net_device *dev, int sset)
  103. {
  104. struct dsa_port *cpu_dp = dev->dsa_ptr;
  105. const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
  106. struct dsa_switch *ds = cpu_dp->ds;
  107. int count = 0;
  108. if (sset == ETH_SS_PHY_STATS && dev->phydev &&
  109. !ops->get_ethtool_phy_stats)
  110. count = phy_ethtool_get_sset_count(dev->phydev);
  111. else if (ops->get_sset_count)
  112. count = ops->get_sset_count(dev, sset);
  113. if (count < 0)
  114. count = 0;
  115. if (ds->ops->get_sset_count)
  116. count += ds->ops->get_sset_count(ds, cpu_dp->index, sset);
  117. return count;
  118. }
  119. static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
  120. uint8_t *data)
  121. {
  122. struct dsa_port *cpu_dp = dev->dsa_ptr;
  123. const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
  124. struct dsa_switch *ds = cpu_dp->ds;
  125. int port = cpu_dp->index;
  126. int len = ETH_GSTRING_LEN;
  127. int mcount = 0, count, i;
  128. uint8_t pfx[4];
  129. uint8_t *ndata;
  130. snprintf(pfx, sizeof(pfx), "p%.2d", port);
  131. /* We do not want to be NULL-terminated, since this is a prefix */
  132. pfx[sizeof(pfx) - 1] = '_';
  133. if (stringset == ETH_SS_PHY_STATS && dev->phydev &&
  134. !ops->get_ethtool_phy_stats) {
  135. mcount = phy_ethtool_get_sset_count(dev->phydev);
  136. if (mcount < 0)
  137. mcount = 0;
  138. else
  139. phy_ethtool_get_strings(dev->phydev, data);
  140. } else if (ops->get_sset_count && ops->get_strings) {
  141. mcount = ops->get_sset_count(dev, stringset);
  142. if (mcount < 0)
  143. mcount = 0;
  144. ops->get_strings(dev, stringset, data);
  145. }
  146. if (ds->ops->get_strings) {
  147. ndata = data + mcount * len;
  148. /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
  149. * the output after to prepend our CPU port prefix we
  150. * constructed earlier
  151. */
  152. ds->ops->get_strings(ds, port, stringset, ndata);
  153. count = ds->ops->get_sset_count(ds, port, stringset);
  154. if (count < 0)
  155. return;
  156. for (i = 0; i < count; i++) {
  157. memmove(ndata + (i * len + sizeof(pfx)),
  158. ndata + i * len, len - sizeof(pfx));
  159. memcpy(ndata + i * len, pfx, sizeof(pfx));
  160. }
  161. }
  162. }
  163. static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  164. {
  165. struct dsa_port *cpu_dp = dev->dsa_ptr;
  166. struct dsa_switch *ds = cpu_dp->ds;
  167. struct dsa_switch_tree *dst;
  168. int err = -EOPNOTSUPP;
  169. struct dsa_port *dp;
  170. dst = ds->dst;
  171. switch (cmd) {
  172. case SIOCGHWTSTAMP:
  173. case SIOCSHWTSTAMP:
  174. /* Deny PTP operations on master if there is at least one
  175. * switch in the tree that is PTP capable.
  176. */
  177. list_for_each_entry(dp, &dst->ports, list)
  178. if (dsa_port_supports_hwtstamp(dp, ifr))
  179. return -EBUSY;
  180. break;
  181. }
  182. if (dev->netdev_ops->ndo_eth_ioctl)
  183. err = dev->netdev_ops->ndo_eth_ioctl(dev, ifr, cmd);
  184. return err;
  185. }
  186. static const struct dsa_netdevice_ops dsa_netdev_ops = {
  187. .ndo_eth_ioctl = dsa_master_ioctl,
  188. };
  189. static int dsa_master_ethtool_setup(struct net_device *dev)
  190. {
  191. struct dsa_port *cpu_dp = dev->dsa_ptr;
  192. struct dsa_switch *ds = cpu_dp->ds;
  193. struct ethtool_ops *ops;
  194. if (netif_is_lag_master(dev))
  195. return 0;
  196. ops = devm_kzalloc(ds->dev, sizeof(*ops), GFP_KERNEL);
  197. if (!ops)
  198. return -ENOMEM;
  199. cpu_dp->orig_ethtool_ops = dev->ethtool_ops;
  200. if (cpu_dp->orig_ethtool_ops)
  201. memcpy(ops, cpu_dp->orig_ethtool_ops, sizeof(*ops));
  202. ops->get_regs_len = dsa_master_get_regs_len;
  203. ops->get_regs = dsa_master_get_regs;
  204. ops->get_sset_count = dsa_master_get_sset_count;
  205. ops->get_ethtool_stats = dsa_master_get_ethtool_stats;
  206. ops->get_strings = dsa_master_get_strings;
  207. ops->get_ethtool_phy_stats = dsa_master_get_ethtool_phy_stats;
  208. dev->ethtool_ops = ops;
  209. return 0;
  210. }
  211. static void dsa_master_ethtool_teardown(struct net_device *dev)
  212. {
  213. struct dsa_port *cpu_dp = dev->dsa_ptr;
  214. if (netif_is_lag_master(dev))
  215. return;
  216. dev->ethtool_ops = cpu_dp->orig_ethtool_ops;
  217. cpu_dp->orig_ethtool_ops = NULL;
  218. }
  219. static void dsa_netdev_ops_set(struct net_device *dev,
  220. const struct dsa_netdevice_ops *ops)
  221. {
  222. if (netif_is_lag_master(dev))
  223. return;
  224. dev->dsa_ptr->netdev_ops = ops;
  225. }
  226. /* Keep the master always promiscuous if the tagging protocol requires that
  227. * (garbles MAC DA) or if it doesn't support unicast filtering, case in which
  228. * it would revert to promiscuous mode as soon as we call dev_uc_add() on it
  229. * anyway.
  230. */
  231. static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
  232. {
  233. const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops;
  234. if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master)
  235. return;
  236. ASSERT_RTNL();
  237. dev_set_promiscuity(dev, inc);
  238. }
  239. static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
  240. char *buf)
  241. {
  242. struct net_device *dev = to_net_dev(d);
  243. struct dsa_port *cpu_dp = dev->dsa_ptr;
  244. return sprintf(buf, "%s\n",
  245. dsa_tag_protocol_to_str(cpu_dp->tag_ops));
  246. }
  247. static ssize_t tagging_store(struct device *d, struct device_attribute *attr,
  248. const char *buf, size_t count)
  249. {
  250. const struct dsa_device_ops *new_tag_ops, *old_tag_ops;
  251. struct net_device *dev = to_net_dev(d);
  252. struct dsa_port *cpu_dp = dev->dsa_ptr;
  253. int err;
  254. old_tag_ops = cpu_dp->tag_ops;
  255. new_tag_ops = dsa_find_tagger_by_name(buf);
  256. /* Bad tagger name, or module is not loaded? */
  257. if (IS_ERR(new_tag_ops))
  258. return PTR_ERR(new_tag_ops);
  259. if (new_tag_ops == old_tag_ops)
  260. /* Drop the temporarily held duplicate reference, since
  261. * the DSA switch tree uses this tagger.
  262. */
  263. goto out;
  264. err = dsa_tree_change_tag_proto(cpu_dp->ds->dst, new_tag_ops,
  265. old_tag_ops);
  266. if (err) {
  267. /* On failure the old tagger is restored, so we don't need the
  268. * driver for the new one.
  269. */
  270. dsa_tag_driver_put(new_tag_ops);
  271. return err;
  272. }
  273. /* On success we no longer need the module for the old tagging protocol
  274. */
  275. out:
  276. dsa_tag_driver_put(old_tag_ops);
  277. return count;
  278. }
  279. static DEVICE_ATTR_RW(tagging);
  280. static struct attribute *dsa_slave_attrs[] = {
  281. &dev_attr_tagging.attr,
  282. NULL
  283. };
  284. static const struct attribute_group dsa_group = {
  285. .name = "dsa",
  286. .attrs = dsa_slave_attrs,
  287. };
  288. static void dsa_master_reset_mtu(struct net_device *dev)
  289. {
  290. int err;
  291. err = dev_set_mtu(dev, ETH_DATA_LEN);
  292. if (err)
  293. netdev_dbg(dev,
  294. "Unable to reset MTU to exclude DSA overheads\n");
  295. }
  296. int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
  297. {
  298. const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
  299. struct dsa_switch *ds = cpu_dp->ds;
  300. struct device_link *consumer_link;
  301. int mtu, ret;
  302. mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops);
  303. /* The DSA master must use SET_NETDEV_DEV for this to work. */
  304. if (!netif_is_lag_master(dev)) {
  305. consumer_link = device_link_add(ds->dev, dev->dev.parent,
  306. DL_FLAG_AUTOREMOVE_CONSUMER);
  307. if (!consumer_link)
  308. netdev_err(dev,
  309. "Failed to create a device link to DSA switch %s\n",
  310. dev_name(ds->dev));
  311. }
  312. /* The switch driver may not implement ->port_change_mtu(), case in
  313. * which dsa_slave_change_mtu() will not update the master MTU either,
  314. * so we need to do that here.
  315. */
  316. ret = dev_set_mtu(dev, mtu);
  317. if (ret)
  318. netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n",
  319. ret, mtu);
  320. /* If we use a tagging format that doesn't have an ethertype
  321. * field, make sure that all packets from this point on get
  322. * sent to the tag format's receive function.
  323. */
  324. wmb();
  325. dev->dsa_ptr = cpu_dp;
  326. dsa_master_set_promiscuity(dev, 1);
  327. ret = dsa_master_ethtool_setup(dev);
  328. if (ret)
  329. goto out_err_reset_promisc;
  330. dsa_netdev_ops_set(dev, &dsa_netdev_ops);
  331. ret = sysfs_create_group(&dev->dev.kobj, &dsa_group);
  332. if (ret)
  333. goto out_err_ndo_teardown;
  334. return ret;
  335. out_err_ndo_teardown:
  336. dsa_netdev_ops_set(dev, NULL);
  337. dsa_master_ethtool_teardown(dev);
  338. out_err_reset_promisc:
  339. dsa_master_set_promiscuity(dev, -1);
  340. return ret;
  341. }
  342. void dsa_master_teardown(struct net_device *dev)
  343. {
  344. sysfs_remove_group(&dev->dev.kobj, &dsa_group);
  345. dsa_netdev_ops_set(dev, NULL);
  346. dsa_master_ethtool_teardown(dev);
  347. dsa_master_reset_mtu(dev);
  348. dsa_master_set_promiscuity(dev, -1);
  349. dev->dsa_ptr = NULL;
  350. /* If we used a tagging format that doesn't have an ethertype
  351. * field, make sure that all packets from this point get sent
  352. * without the tag and go through the regular receive path.
  353. */
  354. wmb();
  355. }
  356. int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
  357. struct netdev_lag_upper_info *uinfo,
  358. struct netlink_ext_ack *extack)
  359. {
  360. bool master_setup = false;
  361. int err;
  362. if (!netdev_uses_dsa(lag_dev)) {
  363. err = dsa_master_setup(lag_dev, cpu_dp);
  364. if (err)
  365. return err;
  366. master_setup = true;
  367. }
  368. err = dsa_port_lag_join(cpu_dp, lag_dev, uinfo, extack);
  369. if (err) {
  370. if (extack && !extack->_msg)
  371. NL_SET_ERR_MSG_MOD(extack,
  372. "CPU port failed to join LAG");
  373. goto out_master_teardown;
  374. }
  375. return 0;
  376. out_master_teardown:
  377. if (master_setup)
  378. dsa_master_teardown(lag_dev);
  379. return err;
  380. }
  381. /* Tear down a master if there isn't any other user port on it,
  382. * optionally also destroying LAG information.
  383. */
  384. void dsa_master_lag_teardown(struct net_device *lag_dev,
  385. struct dsa_port *cpu_dp)
  386. {
  387. struct net_device *upper;
  388. struct list_head *iter;
  389. dsa_port_lag_leave(cpu_dp, lag_dev);
  390. netdev_for_each_upper_dev_rcu(lag_dev, upper, iter)
  391. if (dsa_slave_dev_check(upper))
  392. return;
  393. dsa_master_teardown(lag_dev);
  394. }