dsa.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * net/dsa/dsa.c - Hardware switch handling
  4. * Copyright (c) 2008-2009 Marvell Semiconductor
  5. * Copyright (c) 2013 Florian Fainelli <[email protected]>
  6. */
  7. #include <linux/device.h>
  8. #include <linux/list.h>
  9. #include <linux/module.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/sysfs.h>
  12. #include <linux/ptp_classify.h>
  13. #include "dsa_priv.h"
  14. static LIST_HEAD(dsa_tag_drivers_list);
  15. static DEFINE_MUTEX(dsa_tag_drivers_lock);
  16. static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
  17. struct net_device *dev)
  18. {
  19. /* Just return the original SKB */
  20. return skb;
  21. }
  22. static const struct dsa_device_ops none_ops = {
  23. .name = "none",
  24. .proto = DSA_TAG_PROTO_NONE,
  25. .xmit = dsa_slave_notag_xmit,
  26. .rcv = NULL,
  27. };
  28. DSA_TAG_DRIVER(none_ops);
  29. static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver,
  30. struct module *owner)
  31. {
  32. dsa_tag_driver->owner = owner;
  33. mutex_lock(&dsa_tag_drivers_lock);
  34. list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list);
  35. mutex_unlock(&dsa_tag_drivers_lock);
  36. }
  37. void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
  38. unsigned int count, struct module *owner)
  39. {
  40. unsigned int i;
  41. for (i = 0; i < count; i++)
  42. dsa_tag_driver_register(dsa_tag_driver_array[i], owner);
  43. }
  44. static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver)
  45. {
  46. mutex_lock(&dsa_tag_drivers_lock);
  47. list_del(&dsa_tag_driver->list);
  48. mutex_unlock(&dsa_tag_drivers_lock);
  49. }
  50. EXPORT_SYMBOL_GPL(dsa_tag_drivers_register);
  51. void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
  52. unsigned int count)
  53. {
  54. unsigned int i;
  55. for (i = 0; i < count; i++)
  56. dsa_tag_driver_unregister(dsa_tag_driver_array[i]);
  57. }
  58. EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister);
  59. const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
  60. {
  61. return ops->name;
  62. };
  63. /* Function takes a reference on the module owning the tagger,
  64. * so dsa_tag_driver_put must be called afterwards.
  65. */
  66. const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf)
  67. {
  68. const struct dsa_device_ops *ops = ERR_PTR(-ENOPROTOOPT);
  69. struct dsa_tag_driver *dsa_tag_driver;
  70. mutex_lock(&dsa_tag_drivers_lock);
  71. list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
  72. const struct dsa_device_ops *tmp = dsa_tag_driver->ops;
  73. if (!sysfs_streq(buf, tmp->name))
  74. continue;
  75. if (!try_module_get(dsa_tag_driver->owner))
  76. break;
  77. ops = tmp;
  78. break;
  79. }
  80. mutex_unlock(&dsa_tag_drivers_lock);
  81. return ops;
  82. }
  83. const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol)
  84. {
  85. struct dsa_tag_driver *dsa_tag_driver;
  86. const struct dsa_device_ops *ops;
  87. bool found = false;
  88. request_module("%s%d", DSA_TAG_DRIVER_ALIAS, tag_protocol);
  89. mutex_lock(&dsa_tag_drivers_lock);
  90. list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
  91. ops = dsa_tag_driver->ops;
  92. if (ops->proto == tag_protocol) {
  93. found = true;
  94. break;
  95. }
  96. }
  97. if (found) {
  98. if (!try_module_get(dsa_tag_driver->owner))
  99. ops = ERR_PTR(-ENOPROTOOPT);
  100. } else {
  101. ops = ERR_PTR(-ENOPROTOOPT);
  102. }
  103. mutex_unlock(&dsa_tag_drivers_lock);
  104. return ops;
  105. }
  106. void dsa_tag_driver_put(const struct dsa_device_ops *ops)
  107. {
  108. struct dsa_tag_driver *dsa_tag_driver;
  109. mutex_lock(&dsa_tag_drivers_lock);
  110. list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
  111. if (dsa_tag_driver->ops == ops) {
  112. module_put(dsa_tag_driver->owner);
  113. break;
  114. }
  115. }
  116. mutex_unlock(&dsa_tag_drivers_lock);
  117. }
  118. static int dev_is_class(struct device *dev, void *class)
  119. {
  120. if (dev->class != NULL && !strcmp(dev->class->name, class))
  121. return 1;
  122. return 0;
  123. }
  124. static struct device *dev_find_class(struct device *parent, char *class)
  125. {
  126. if (dev_is_class(parent, class)) {
  127. get_device(parent);
  128. return parent;
  129. }
  130. return device_find_child(parent, class, dev_is_class);
  131. }
  132. struct net_device *dsa_dev_to_net_device(struct device *dev)
  133. {
  134. struct device *d;
  135. d = dev_find_class(dev, "net");
  136. if (d != NULL) {
  137. struct net_device *nd;
  138. nd = to_net_dev(d);
  139. dev_hold(nd);
  140. put_device(d);
  141. return nd;
  142. }
  143. return NULL;
  144. }
  145. EXPORT_SYMBOL_GPL(dsa_dev_to_net_device);
  146. /* Determine if we should defer delivery of skb until we have a rx timestamp.
  147. *
  148. * Called from dsa_switch_rcv. For now, this will only work if tagging is
  149. * enabled on the switch. Normally the MAC driver would retrieve the hardware
  150. * timestamp when it reads the packet out of the hardware. However in a DSA
  151. * switch, the DSA driver owning the interface to which the packet is
  152. * delivered is never notified unless we do so here.
  153. */
  154. static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
  155. struct sk_buff *skb)
  156. {
  157. struct dsa_switch *ds = p->dp->ds;
  158. unsigned int type;
  159. if (skb_headroom(skb) < ETH_HLEN)
  160. return false;
  161. __skb_push(skb, ETH_HLEN);
  162. type = ptp_classify_raw(skb);
  163. __skb_pull(skb, ETH_HLEN);
  164. if (type == PTP_CLASS_NONE)
  165. return false;
  166. if (likely(ds->ops->port_rxtstamp))
  167. return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
  168. return false;
  169. }
  170. static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
  171. struct packet_type *pt, struct net_device *unused)
  172. {
  173. struct dsa_port *cpu_dp = dev->dsa_ptr;
  174. struct sk_buff *nskb = NULL;
  175. struct dsa_slave_priv *p;
  176. if (unlikely(!cpu_dp)) {
  177. kfree_skb(skb);
  178. return 0;
  179. }
  180. skb = skb_unshare(skb, GFP_ATOMIC);
  181. if (!skb)
  182. return 0;
  183. nskb = cpu_dp->rcv(skb, dev);
  184. if (!nskb) {
  185. kfree_skb(skb);
  186. return 0;
  187. }
  188. skb = nskb;
  189. skb_push(skb, ETH_HLEN);
  190. skb->pkt_type = PACKET_HOST;
  191. skb->protocol = eth_type_trans(skb, skb->dev);
  192. if (unlikely(!dsa_slave_dev_check(skb->dev))) {
  193. /* Packet is to be injected directly on an upper
  194. * device, e.g. a team/bond, so skip all DSA-port
  195. * specific actions.
  196. */
  197. netif_rx(skb);
  198. return 0;
  199. }
  200. p = netdev_priv(skb->dev);
  201. if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
  202. nskb = dsa_untag_bridge_pvid(skb);
  203. if (!nskb) {
  204. kfree_skb(skb);
  205. return 0;
  206. }
  207. skb = nskb;
  208. }
  209. dev_sw_netstats_rx_add(skb->dev, skb->len);
  210. if (dsa_skb_defer_rx_timestamp(p, skb))
  211. return 0;
  212. gro_cells_receive(&p->gcells, skb);
  213. return 0;
  214. }
  215. #ifdef CONFIG_PM_SLEEP
  216. static bool dsa_port_is_initialized(const struct dsa_port *dp)
  217. {
  218. return dp->type == DSA_PORT_TYPE_USER && dp->slave;
  219. }
  220. int dsa_switch_suspend(struct dsa_switch *ds)
  221. {
  222. struct dsa_port *dp;
  223. int ret = 0;
  224. /* Suspend slave network devices */
  225. dsa_switch_for_each_port(dp, ds) {
  226. if (!dsa_port_is_initialized(dp))
  227. continue;
  228. ret = dsa_slave_suspend(dp->slave);
  229. if (ret)
  230. return ret;
  231. }
  232. if (ds->ops->suspend)
  233. ret = ds->ops->suspend(ds);
  234. return ret;
  235. }
  236. EXPORT_SYMBOL_GPL(dsa_switch_suspend);
  237. int dsa_switch_resume(struct dsa_switch *ds)
  238. {
  239. struct dsa_port *dp;
  240. int ret = 0;
  241. if (ds->ops->resume)
  242. ret = ds->ops->resume(ds);
  243. if (ret)
  244. return ret;
  245. /* Resume slave network devices */
  246. dsa_switch_for_each_port(dp, ds) {
  247. if (!dsa_port_is_initialized(dp))
  248. continue;
  249. ret = dsa_slave_resume(dp->slave);
  250. if (ret)
  251. return ret;
  252. }
  253. return 0;
  254. }
  255. EXPORT_SYMBOL_GPL(dsa_switch_resume);
  256. #endif
  257. static struct packet_type dsa_pack_type __read_mostly = {
  258. .type = cpu_to_be16(ETH_P_XDSA),
  259. .func = dsa_switch_rcv,
  260. };
  261. static struct workqueue_struct *dsa_owq;
  262. bool dsa_schedule_work(struct work_struct *work)
  263. {
  264. return queue_work(dsa_owq, work);
  265. }
  266. void dsa_flush_workqueue(void)
  267. {
  268. flush_workqueue(dsa_owq);
  269. }
  270. EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
  271. int dsa_devlink_param_get(struct devlink *dl, u32 id,
  272. struct devlink_param_gset_ctx *ctx)
  273. {
  274. struct dsa_switch *ds = dsa_devlink_to_ds(dl);
  275. if (!ds->ops->devlink_param_get)
  276. return -EOPNOTSUPP;
  277. return ds->ops->devlink_param_get(ds, id, ctx);
  278. }
  279. EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
  280. int dsa_devlink_param_set(struct devlink *dl, u32 id,
  281. struct devlink_param_gset_ctx *ctx)
  282. {
  283. struct dsa_switch *ds = dsa_devlink_to_ds(dl);
  284. if (!ds->ops->devlink_param_set)
  285. return -EOPNOTSUPP;
  286. return ds->ops->devlink_param_set(ds, id, ctx);
  287. }
  288. EXPORT_SYMBOL_GPL(dsa_devlink_param_set);
  289. int dsa_devlink_params_register(struct dsa_switch *ds,
  290. const struct devlink_param *params,
  291. size_t params_count)
  292. {
  293. return devlink_params_register(ds->devlink, params, params_count);
  294. }
  295. EXPORT_SYMBOL_GPL(dsa_devlink_params_register);
  296. void dsa_devlink_params_unregister(struct dsa_switch *ds,
  297. const struct devlink_param *params,
  298. size_t params_count)
  299. {
  300. devlink_params_unregister(ds->devlink, params, params_count);
  301. }
  302. EXPORT_SYMBOL_GPL(dsa_devlink_params_unregister);
  303. int dsa_devlink_resource_register(struct dsa_switch *ds,
  304. const char *resource_name,
  305. u64 resource_size,
  306. u64 resource_id,
  307. u64 parent_resource_id,
  308. const struct devlink_resource_size_params *size_params)
  309. {
  310. return devlink_resource_register(ds->devlink, resource_name,
  311. resource_size, resource_id,
  312. parent_resource_id,
  313. size_params);
  314. }
  315. EXPORT_SYMBOL_GPL(dsa_devlink_resource_register);
  316. void dsa_devlink_resources_unregister(struct dsa_switch *ds)
  317. {
  318. devlink_resources_unregister(ds->devlink);
  319. }
  320. EXPORT_SYMBOL_GPL(dsa_devlink_resources_unregister);
  321. void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
  322. u64 resource_id,
  323. devlink_resource_occ_get_t *occ_get,
  324. void *occ_get_priv)
  325. {
  326. return devlink_resource_occ_get_register(ds->devlink, resource_id,
  327. occ_get, occ_get_priv);
  328. }
  329. EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_register);
  330. void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
  331. u64 resource_id)
  332. {
  333. devlink_resource_occ_get_unregister(ds->devlink, resource_id);
  334. }
  335. EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
  336. struct devlink_region *
  337. dsa_devlink_region_create(struct dsa_switch *ds,
  338. const struct devlink_region_ops *ops,
  339. u32 region_max_snapshots, u64 region_size)
  340. {
  341. return devlink_region_create(ds->devlink, ops, region_max_snapshots,
  342. region_size);
  343. }
  344. EXPORT_SYMBOL_GPL(dsa_devlink_region_create);
  345. struct devlink_region *
  346. dsa_devlink_port_region_create(struct dsa_switch *ds,
  347. int port,
  348. const struct devlink_port_region_ops *ops,
  349. u32 region_max_snapshots, u64 region_size)
  350. {
  351. struct dsa_port *dp = dsa_to_port(ds, port);
  352. return devlink_port_region_create(&dp->devlink_port, ops,
  353. region_max_snapshots,
  354. region_size);
  355. }
  356. EXPORT_SYMBOL_GPL(dsa_devlink_port_region_create);
  357. void dsa_devlink_region_destroy(struct devlink_region *region)
  358. {
  359. devlink_region_destroy(region);
  360. }
  361. EXPORT_SYMBOL_GPL(dsa_devlink_region_destroy);
  362. struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
  363. {
  364. if (!netdev || !dsa_slave_dev_check(netdev))
  365. return ERR_PTR(-ENODEV);
  366. return dsa_slave_to_port(netdev);
  367. }
  368. EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
  369. bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b)
  370. {
  371. if (a->type != b->type)
  372. return false;
  373. switch (a->type) {
  374. case DSA_DB_PORT:
  375. return a->dp == b->dp;
  376. case DSA_DB_LAG:
  377. return a->lag.dev == b->lag.dev;
  378. case DSA_DB_BRIDGE:
  379. return a->bridge.num == b->bridge.num;
  380. default:
  381. WARN_ON(1);
  382. return false;
  383. }
  384. }
  385. bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
  386. const unsigned char *addr, u16 vid,
  387. struct dsa_db db)
  388. {
  389. struct dsa_port *dp = dsa_to_port(ds, port);
  390. struct dsa_mac_addr *a;
  391. lockdep_assert_held(&dp->addr_lists_lock);
  392. list_for_each_entry(a, &dp->fdbs, list) {
  393. if (!ether_addr_equal(a->addr, addr) || a->vid != vid)
  394. continue;
  395. if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
  396. return true;
  397. }
  398. return false;
  399. }
  400. EXPORT_SYMBOL_GPL(dsa_fdb_present_in_other_db);
  401. bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
  402. const struct switchdev_obj_port_mdb *mdb,
  403. struct dsa_db db)
  404. {
  405. struct dsa_port *dp = dsa_to_port(ds, port);
  406. struct dsa_mac_addr *a;
  407. lockdep_assert_held(&dp->addr_lists_lock);
  408. list_for_each_entry(a, &dp->mdbs, list) {
  409. if (!ether_addr_equal(a->addr, mdb->addr) || a->vid != mdb->vid)
  410. continue;
  411. if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
  412. return true;
  413. }
  414. return false;
  415. }
  416. EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
  417. static int __init dsa_init_module(void)
  418. {
  419. int rc;
  420. dsa_owq = alloc_ordered_workqueue("dsa_ordered",
  421. WQ_MEM_RECLAIM);
  422. if (!dsa_owq)
  423. return -ENOMEM;
  424. rc = dsa_slave_register_notifier();
  425. if (rc)
  426. goto register_notifier_fail;
  427. dev_add_pack(&dsa_pack_type);
  428. dsa_tag_driver_register(&DSA_TAG_DRIVER_NAME(none_ops),
  429. THIS_MODULE);
  430. rc = rtnl_link_register(&dsa_link_ops);
  431. if (rc)
  432. goto netlink_register_fail;
  433. return 0;
  434. netlink_register_fail:
  435. dsa_tag_driver_unregister(&DSA_TAG_DRIVER_NAME(none_ops));
  436. dsa_slave_unregister_notifier();
  437. dev_remove_pack(&dsa_pack_type);
  438. register_notifier_fail:
  439. destroy_workqueue(dsa_owq);
  440. return rc;
  441. }
  442. module_init(dsa_init_module);
  443. static void __exit dsa_cleanup_module(void)
  444. {
  445. rtnl_link_unregister(&dsa_link_ops);
  446. dsa_tag_driver_unregister(&DSA_TAG_DRIVER_NAME(none_ops));
  447. dsa_slave_unregister_notifier();
  448. dev_remove_pack(&dsa_pack_type);
  449. destroy_workqueue(dsa_owq);
  450. }
  451. module_exit(dsa_cleanup_module);
  452. MODULE_AUTHOR("Lennert Buytenhek <[email protected]>");
  453. MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
  454. MODULE_LICENSE("GPL");
  455. MODULE_ALIAS("platform:dsa");