switchdev.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * net/switchdev/switchdev.c - Switch device API
  4. * Copyright (c) 2014-2015 Jiri Pirko <[email protected]>
  5. * Copyright (c) 2014-2015 Scott Feldman <[email protected]>
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/types.h>
  9. #include <linux/init.h>
  10. #include <linux/mutex.h>
  11. #include <linux/notifier.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/if_bridge.h>
  15. #include <linux/list.h>
  16. #include <linux/workqueue.h>
  17. #include <linux/if_vlan.h>
  18. #include <linux/rtnetlink.h>
  19. #include <net/switchdev.h>
  20. static LIST_HEAD(deferred);
  21. static DEFINE_SPINLOCK(deferred_lock);
  22. typedef void switchdev_deferred_func_t(struct net_device *dev,
  23. const void *data);
  24. struct switchdev_deferred_item {
  25. struct list_head list;
  26. struct net_device *dev;
  27. netdevice_tracker dev_tracker;
  28. switchdev_deferred_func_t *func;
  29. unsigned long data[];
  30. };
  31. static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
  32. {
  33. struct switchdev_deferred_item *dfitem;
  34. spin_lock_bh(&deferred_lock);
  35. if (list_empty(&deferred)) {
  36. dfitem = NULL;
  37. goto unlock;
  38. }
  39. dfitem = list_first_entry(&deferred,
  40. struct switchdev_deferred_item, list);
  41. list_del(&dfitem->list);
  42. unlock:
  43. spin_unlock_bh(&deferred_lock);
  44. return dfitem;
  45. }
  46. /**
  47. * switchdev_deferred_process - Process ops in deferred queue
  48. *
  49. * Called to flush the ops currently queued in deferred ops queue.
  50. * rtnl_lock must be held.
  51. */
  52. void switchdev_deferred_process(void)
  53. {
  54. struct switchdev_deferred_item *dfitem;
  55. ASSERT_RTNL();
  56. while ((dfitem = switchdev_deferred_dequeue())) {
  57. dfitem->func(dfitem->dev, dfitem->data);
  58. netdev_put(dfitem->dev, &dfitem->dev_tracker);
  59. kfree(dfitem);
  60. }
  61. }
  62. EXPORT_SYMBOL_GPL(switchdev_deferred_process);
  63. static void switchdev_deferred_process_work(struct work_struct *work)
  64. {
  65. rtnl_lock();
  66. switchdev_deferred_process();
  67. rtnl_unlock();
  68. }
  69. static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
  70. static int switchdev_deferred_enqueue(struct net_device *dev,
  71. const void *data, size_t data_len,
  72. switchdev_deferred_func_t *func)
  73. {
  74. struct switchdev_deferred_item *dfitem;
  75. dfitem = kmalloc(struct_size(dfitem, data, data_len), GFP_ATOMIC);
  76. if (!dfitem)
  77. return -ENOMEM;
  78. dfitem->dev = dev;
  79. dfitem->func = func;
  80. memcpy(dfitem->data, data, data_len);
  81. netdev_hold(dev, &dfitem->dev_tracker, GFP_ATOMIC);
  82. spin_lock_bh(&deferred_lock);
  83. list_add_tail(&dfitem->list, &deferred);
  84. spin_unlock_bh(&deferred_lock);
  85. schedule_work(&deferred_process_work);
  86. return 0;
  87. }
  88. static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
  89. struct net_device *dev,
  90. const struct switchdev_attr *attr,
  91. struct netlink_ext_ack *extack)
  92. {
  93. int err;
  94. int rc;
  95. struct switchdev_notifier_port_attr_info attr_info = {
  96. .attr = attr,
  97. .handled = false,
  98. };
  99. rc = call_switchdev_blocking_notifiers(nt, dev,
  100. &attr_info.info, extack);
  101. err = notifier_to_errno(rc);
  102. if (err) {
  103. WARN_ON(!attr_info.handled);
  104. return err;
  105. }
  106. if (!attr_info.handled)
  107. return -EOPNOTSUPP;
  108. return 0;
  109. }
  110. static int switchdev_port_attr_set_now(struct net_device *dev,
  111. const struct switchdev_attr *attr,
  112. struct netlink_ext_ack *extack)
  113. {
  114. return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
  115. extack);
  116. }
  117. static void switchdev_port_attr_set_deferred(struct net_device *dev,
  118. const void *data)
  119. {
  120. const struct switchdev_attr *attr = data;
  121. int err;
  122. err = switchdev_port_attr_set_now(dev, attr, NULL);
  123. if (err && err != -EOPNOTSUPP)
  124. netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
  125. err, attr->id);
  126. if (attr->complete)
  127. attr->complete(dev, err, attr->complete_priv);
  128. }
  129. static int switchdev_port_attr_set_defer(struct net_device *dev,
  130. const struct switchdev_attr *attr)
  131. {
  132. return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
  133. switchdev_port_attr_set_deferred);
  134. }
  135. /**
  136. * switchdev_port_attr_set - Set port attribute
  137. *
  138. * @dev: port device
  139. * @attr: attribute to set
  140. * @extack: netlink extended ack, for error message propagation
  141. *
  142. * rtnl_lock must be held and must not be in atomic section,
  143. * in case SWITCHDEV_F_DEFER flag is not set.
  144. */
  145. int switchdev_port_attr_set(struct net_device *dev,
  146. const struct switchdev_attr *attr,
  147. struct netlink_ext_ack *extack)
  148. {
  149. if (attr->flags & SWITCHDEV_F_DEFER)
  150. return switchdev_port_attr_set_defer(dev, attr);
  151. ASSERT_RTNL();
  152. return switchdev_port_attr_set_now(dev, attr, extack);
  153. }
  154. EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
  155. static size_t switchdev_obj_size(const struct switchdev_obj *obj)
  156. {
  157. switch (obj->id) {
  158. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  159. return sizeof(struct switchdev_obj_port_vlan);
  160. case SWITCHDEV_OBJ_ID_PORT_MDB:
  161. return sizeof(struct switchdev_obj_port_mdb);
  162. case SWITCHDEV_OBJ_ID_HOST_MDB:
  163. return sizeof(struct switchdev_obj_port_mdb);
  164. default:
  165. BUG();
  166. }
  167. return 0;
  168. }
  169. static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
  170. struct net_device *dev,
  171. const struct switchdev_obj *obj,
  172. struct netlink_ext_ack *extack)
  173. {
  174. int rc;
  175. int err;
  176. struct switchdev_notifier_port_obj_info obj_info = {
  177. .obj = obj,
  178. .handled = false,
  179. };
  180. rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
  181. err = notifier_to_errno(rc);
  182. if (err) {
  183. WARN_ON(!obj_info.handled);
  184. return err;
  185. }
  186. if (!obj_info.handled)
  187. return -EOPNOTSUPP;
  188. return 0;
  189. }
  190. static void switchdev_port_obj_add_deferred(struct net_device *dev,
  191. const void *data)
  192. {
  193. const struct switchdev_obj *obj = data;
  194. int err;
  195. ASSERT_RTNL();
  196. err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
  197. dev, obj, NULL);
  198. if (err && err != -EOPNOTSUPP)
  199. netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
  200. err, obj->id);
  201. if (obj->complete)
  202. obj->complete(dev, err, obj->complete_priv);
  203. }
  204. static int switchdev_port_obj_add_defer(struct net_device *dev,
  205. const struct switchdev_obj *obj)
  206. {
  207. return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
  208. switchdev_port_obj_add_deferred);
  209. }
  210. /**
  211. * switchdev_port_obj_add - Add port object
  212. *
  213. * @dev: port device
  214. * @obj: object to add
  215. * @extack: netlink extended ack
  216. *
  217. * rtnl_lock must be held and must not be in atomic section,
  218. * in case SWITCHDEV_F_DEFER flag is not set.
  219. */
  220. int switchdev_port_obj_add(struct net_device *dev,
  221. const struct switchdev_obj *obj,
  222. struct netlink_ext_ack *extack)
  223. {
  224. if (obj->flags & SWITCHDEV_F_DEFER)
  225. return switchdev_port_obj_add_defer(dev, obj);
  226. ASSERT_RTNL();
  227. return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
  228. dev, obj, extack);
  229. }
  230. EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
  231. static int switchdev_port_obj_del_now(struct net_device *dev,
  232. const struct switchdev_obj *obj)
  233. {
  234. return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
  235. dev, obj, NULL);
  236. }
  237. static void switchdev_port_obj_del_deferred(struct net_device *dev,
  238. const void *data)
  239. {
  240. const struct switchdev_obj *obj = data;
  241. int err;
  242. err = switchdev_port_obj_del_now(dev, obj);
  243. if (err && err != -EOPNOTSUPP)
  244. netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
  245. err, obj->id);
  246. if (obj->complete)
  247. obj->complete(dev, err, obj->complete_priv);
  248. }
  249. static int switchdev_port_obj_del_defer(struct net_device *dev,
  250. const struct switchdev_obj *obj)
  251. {
  252. return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
  253. switchdev_port_obj_del_deferred);
  254. }
  255. /**
  256. * switchdev_port_obj_del - Delete port object
  257. *
  258. * @dev: port device
  259. * @obj: object to delete
  260. *
  261. * rtnl_lock must be held and must not be in atomic section,
  262. * in case SWITCHDEV_F_DEFER flag is not set.
  263. */
  264. int switchdev_port_obj_del(struct net_device *dev,
  265. const struct switchdev_obj *obj)
  266. {
  267. if (obj->flags & SWITCHDEV_F_DEFER)
  268. return switchdev_port_obj_del_defer(dev, obj);
  269. ASSERT_RTNL();
  270. return switchdev_port_obj_del_now(dev, obj);
  271. }
  272. EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
  273. static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
  274. static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
  275. /**
  276. * register_switchdev_notifier - Register notifier
  277. * @nb: notifier_block
  278. *
  279. * Register switch device notifier.
  280. */
  281. int register_switchdev_notifier(struct notifier_block *nb)
  282. {
  283. return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
  284. }
  285. EXPORT_SYMBOL_GPL(register_switchdev_notifier);
  286. /**
  287. * unregister_switchdev_notifier - Unregister notifier
  288. * @nb: notifier_block
  289. *
  290. * Unregister switch device notifier.
  291. */
  292. int unregister_switchdev_notifier(struct notifier_block *nb)
  293. {
  294. return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
  295. }
  296. EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
  297. /**
  298. * call_switchdev_notifiers - Call notifiers
  299. * @val: value passed unmodified to notifier function
  300. * @dev: port device
  301. * @info: notifier information data
  302. * @extack: netlink extended ack
  303. * Call all network notifier blocks.
  304. */
  305. int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
  306. struct switchdev_notifier_info *info,
  307. struct netlink_ext_ack *extack)
  308. {
  309. info->dev = dev;
  310. info->extack = extack;
  311. return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
  312. }
  313. EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
  314. int register_switchdev_blocking_notifier(struct notifier_block *nb)
  315. {
  316. struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
  317. return blocking_notifier_chain_register(chain, nb);
  318. }
  319. EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
  320. int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
  321. {
  322. struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
  323. return blocking_notifier_chain_unregister(chain, nb);
  324. }
  325. EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
  326. int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
  327. struct switchdev_notifier_info *info,
  328. struct netlink_ext_ack *extack)
  329. {
  330. info->dev = dev;
  331. info->extack = extack;
  332. return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
  333. val, info);
  334. }
  335. EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
  336. struct switchdev_nested_priv {
  337. bool (*check_cb)(const struct net_device *dev);
  338. bool (*foreign_dev_check_cb)(const struct net_device *dev,
  339. const struct net_device *foreign_dev);
  340. const struct net_device *dev;
  341. struct net_device *lower_dev;
  342. };
  343. static int switchdev_lower_dev_walk(struct net_device *lower_dev,
  344. struct netdev_nested_priv *priv)
  345. {
  346. struct switchdev_nested_priv *switchdev_priv = priv->data;
  347. bool (*foreign_dev_check_cb)(const struct net_device *dev,
  348. const struct net_device *foreign_dev);
  349. bool (*check_cb)(const struct net_device *dev);
  350. const struct net_device *dev;
  351. check_cb = switchdev_priv->check_cb;
  352. foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb;
  353. dev = switchdev_priv->dev;
  354. if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) {
  355. switchdev_priv->lower_dev = lower_dev;
  356. return 1;
  357. }
  358. return 0;
  359. }
  360. static struct net_device *
  361. switchdev_lower_dev_find_rcu(struct net_device *dev,
  362. bool (*check_cb)(const struct net_device *dev),
  363. bool (*foreign_dev_check_cb)(const struct net_device *dev,
  364. const struct net_device *foreign_dev))
  365. {
  366. struct switchdev_nested_priv switchdev_priv = {
  367. .check_cb = check_cb,
  368. .foreign_dev_check_cb = foreign_dev_check_cb,
  369. .dev = dev,
  370. .lower_dev = NULL,
  371. };
  372. struct netdev_nested_priv priv = {
  373. .data = &switchdev_priv,
  374. };
  375. netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv);
  376. return switchdev_priv.lower_dev;
  377. }
  378. static struct net_device *
  379. switchdev_lower_dev_find(struct net_device *dev,
  380. bool (*check_cb)(const struct net_device *dev),
  381. bool (*foreign_dev_check_cb)(const struct net_device *dev,
  382. const struct net_device *foreign_dev))
  383. {
  384. struct switchdev_nested_priv switchdev_priv = {
  385. .check_cb = check_cb,
  386. .foreign_dev_check_cb = foreign_dev_check_cb,
  387. .dev = dev,
  388. .lower_dev = NULL,
  389. };
  390. struct netdev_nested_priv priv = {
  391. .data = &switchdev_priv,
  392. };
  393. netdev_walk_all_lower_dev(dev, switchdev_lower_dev_walk, &priv);
  394. return switchdev_priv.lower_dev;
  395. }
  396. static int __switchdev_handle_fdb_event_to_device(struct net_device *dev,
  397. struct net_device *orig_dev, unsigned long event,
  398. const struct switchdev_notifier_fdb_info *fdb_info,
  399. bool (*check_cb)(const struct net_device *dev),
  400. bool (*foreign_dev_check_cb)(const struct net_device *dev,
  401. const struct net_device *foreign_dev),
  402. int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
  403. unsigned long event, const void *ctx,
  404. const struct switchdev_notifier_fdb_info *fdb_info))
  405. {
  406. const struct switchdev_notifier_info *info = &fdb_info->info;
  407. struct net_device *br, *lower_dev, *switchdev;
  408. struct list_head *iter;
  409. int err = -EOPNOTSUPP;
  410. if (check_cb(dev))
  411. return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
  412. /* Recurse through lower interfaces in case the FDB entry is pointing
  413. * towards a bridge or a LAG device.
  414. */
  415. netdev_for_each_lower_dev(dev, lower_dev, iter) {
  416. /* Do not propagate FDB entries across bridges */
  417. if (netif_is_bridge_master(lower_dev))
  418. continue;
  419. /* Bridge ports might be either us, or LAG interfaces
  420. * that we offload.
  421. */
  422. if (!check_cb(lower_dev) &&
  423. !switchdev_lower_dev_find_rcu(lower_dev, check_cb,
  424. foreign_dev_check_cb))
  425. continue;
  426. err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
  427. event, fdb_info, check_cb,
  428. foreign_dev_check_cb,
  429. mod_cb);
  430. if (err && err != -EOPNOTSUPP)
  431. return err;
  432. }
  433. /* Event is neither on a bridge nor a LAG. Check whether it is on an
  434. * interface that is in a bridge with us.
  435. */
  436. br = netdev_master_upper_dev_get_rcu(dev);
  437. if (!br || !netif_is_bridge_master(br))
  438. return 0;
  439. switchdev = switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb);
  440. if (!switchdev)
  441. return 0;
  442. if (!foreign_dev_check_cb(switchdev, dev))
  443. return err;
  444. return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info,
  445. check_cb, foreign_dev_check_cb,
  446. mod_cb);
  447. }
  448. int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
  449. const struct switchdev_notifier_fdb_info *fdb_info,
  450. bool (*check_cb)(const struct net_device *dev),
  451. bool (*foreign_dev_check_cb)(const struct net_device *dev,
  452. const struct net_device *foreign_dev),
  453. int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
  454. unsigned long event, const void *ctx,
  455. const struct switchdev_notifier_fdb_info *fdb_info))
  456. {
  457. int err;
  458. err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info,
  459. check_cb, foreign_dev_check_cb,
  460. mod_cb);
  461. if (err == -EOPNOTSUPP)
  462. err = 0;
  463. return err;
  464. }
  465. EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device);
  466. static int __switchdev_handle_port_obj_add(struct net_device *dev,
  467. struct switchdev_notifier_port_obj_info *port_obj_info,
  468. bool (*check_cb)(const struct net_device *dev),
  469. bool (*foreign_dev_check_cb)(const struct net_device *dev,
  470. const struct net_device *foreign_dev),
  471. int (*add_cb)(struct net_device *dev, const void *ctx,
  472. const struct switchdev_obj *obj,
  473. struct netlink_ext_ack *extack))
  474. {
  475. struct switchdev_notifier_info *info = &port_obj_info->info;
  476. struct net_device *br, *lower_dev, *switchdev;
  477. struct netlink_ext_ack *extack;
  478. struct list_head *iter;
  479. int err = -EOPNOTSUPP;
  480. extack = switchdev_notifier_info_to_extack(info);
  481. if (check_cb(dev)) {
  482. err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
  483. if (err != -EOPNOTSUPP)
  484. port_obj_info->handled = true;
  485. return err;
  486. }
  487. /* Switch ports might be stacked under e.g. a LAG. Ignore the
  488. * unsupported devices, another driver might be able to handle them. But
  489. * propagate to the callers any hard errors.
  490. *
  491. * If the driver does its own bookkeeping of stacked ports, it's not
  492. * necessary to go through this helper.
  493. */
  494. netdev_for_each_lower_dev(dev, lower_dev, iter) {
  495. if (netif_is_bridge_master(lower_dev))
  496. continue;
  497. /* When searching for switchdev interfaces that are neighbors
  498. * of foreign ones, and @dev is a bridge, do not recurse on the
  499. * foreign interface again, it was already visited.
  500. */
  501. if (foreign_dev_check_cb && !check_cb(lower_dev) &&
  502. !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
  503. continue;
  504. err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
  505. check_cb, foreign_dev_check_cb,
  506. add_cb);
  507. if (err && err != -EOPNOTSUPP)
  508. return err;
  509. }
  510. /* Event is neither on a bridge nor a LAG. Check whether it is on an
  511. * interface that is in a bridge with us.
  512. */
  513. if (!foreign_dev_check_cb)
  514. return err;
  515. br = netdev_master_upper_dev_get(dev);
  516. if (!br || !netif_is_bridge_master(br))
  517. return err;
  518. switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
  519. if (!switchdev)
  520. return err;
  521. if (!foreign_dev_check_cb(switchdev, dev))
  522. return err;
  523. return __switchdev_handle_port_obj_add(br, port_obj_info, check_cb,
  524. foreign_dev_check_cb, add_cb);
  525. }
  526. /* Pass through a port object addition, if @dev passes @check_cb, or replicate
  527. * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
  528. * bridge or a LAG.
  529. */
  530. int switchdev_handle_port_obj_add(struct net_device *dev,
  531. struct switchdev_notifier_port_obj_info *port_obj_info,
  532. bool (*check_cb)(const struct net_device *dev),
  533. int (*add_cb)(struct net_device *dev, const void *ctx,
  534. const struct switchdev_obj *obj,
  535. struct netlink_ext_ack *extack))
  536. {
  537. int err;
  538. err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
  539. NULL, add_cb);
  540. if (err == -EOPNOTSUPP)
  541. err = 0;
  542. return err;
  543. }
  544. EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
  545. /* Same as switchdev_handle_port_obj_add(), except if object is notified on a
  546. * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
  547. * that pass @check_cb and are in the same bridge as @dev.
  548. */
  549. int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
  550. struct switchdev_notifier_port_obj_info *port_obj_info,
  551. bool (*check_cb)(const struct net_device *dev),
  552. bool (*foreign_dev_check_cb)(const struct net_device *dev,
  553. const struct net_device *foreign_dev),
  554. int (*add_cb)(struct net_device *dev, const void *ctx,
  555. const struct switchdev_obj *obj,
  556. struct netlink_ext_ack *extack))
  557. {
  558. int err;
  559. err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
  560. foreign_dev_check_cb, add_cb);
  561. if (err == -EOPNOTSUPP)
  562. err = 0;
  563. return err;
  564. }
  565. EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign);
  566. static int __switchdev_handle_port_obj_del(struct net_device *dev,
  567. struct switchdev_notifier_port_obj_info *port_obj_info,
  568. bool (*check_cb)(const struct net_device *dev),
  569. bool (*foreign_dev_check_cb)(const struct net_device *dev,
  570. const struct net_device *foreign_dev),
  571. int (*del_cb)(struct net_device *dev, const void *ctx,
  572. const struct switchdev_obj *obj))
  573. {
  574. struct switchdev_notifier_info *info = &port_obj_info->info;
  575. struct net_device *br, *lower_dev, *switchdev;
  576. struct list_head *iter;
  577. int err = -EOPNOTSUPP;
  578. if (check_cb(dev)) {
  579. err = del_cb(dev, info->ctx, port_obj_info->obj);
  580. if (err != -EOPNOTSUPP)
  581. port_obj_info->handled = true;
  582. return err;
  583. }
  584. /* Switch ports might be stacked under e.g. a LAG. Ignore the
  585. * unsupported devices, another driver might be able to handle them. But
  586. * propagate to the callers any hard errors.
  587. *
  588. * If the driver does its own bookkeeping of stacked ports, it's not
  589. * necessary to go through this helper.
  590. */
  591. netdev_for_each_lower_dev(dev, lower_dev, iter) {
  592. if (netif_is_bridge_master(lower_dev))
  593. continue;
  594. /* When searching for switchdev interfaces that are neighbors
  595. * of foreign ones, and @dev is a bridge, do not recurse on the
  596. * foreign interface again, it was already visited.
  597. */
  598. if (foreign_dev_check_cb && !check_cb(lower_dev) &&
  599. !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
  600. continue;
  601. err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
  602. check_cb, foreign_dev_check_cb,
  603. del_cb);
  604. if (err && err != -EOPNOTSUPP)
  605. return err;
  606. }
  607. /* Event is neither on a bridge nor a LAG. Check whether it is on an
  608. * interface that is in a bridge with us.
  609. */
  610. if (!foreign_dev_check_cb)
  611. return err;
  612. br = netdev_master_upper_dev_get(dev);
  613. if (!br || !netif_is_bridge_master(br))
  614. return err;
  615. switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
  616. if (!switchdev)
  617. return err;
  618. if (!foreign_dev_check_cb(switchdev, dev))
  619. return err;
  620. return __switchdev_handle_port_obj_del(br, port_obj_info, check_cb,
  621. foreign_dev_check_cb, del_cb);
  622. }
  623. /* Pass through a port object deletion, if @dev passes @check_cb, or replicate
  624. * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
  625. * bridge or a LAG.
  626. */
  627. int switchdev_handle_port_obj_del(struct net_device *dev,
  628. struct switchdev_notifier_port_obj_info *port_obj_info,
  629. bool (*check_cb)(const struct net_device *dev),
  630. int (*del_cb)(struct net_device *dev, const void *ctx,
  631. const struct switchdev_obj *obj))
  632. {
  633. int err;
  634. err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
  635. NULL, del_cb);
  636. if (err == -EOPNOTSUPP)
  637. err = 0;
  638. return err;
  639. }
  640. EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
  641. /* Same as switchdev_handle_port_obj_del(), except if object is notified on a
  642. * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
  643. * that pass @check_cb and are in the same bridge as @dev.
  644. */
  645. int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
  646. struct switchdev_notifier_port_obj_info *port_obj_info,
  647. bool (*check_cb)(const struct net_device *dev),
  648. bool (*foreign_dev_check_cb)(const struct net_device *dev,
  649. const struct net_device *foreign_dev),
  650. int (*del_cb)(struct net_device *dev, const void *ctx,
  651. const struct switchdev_obj *obj))
  652. {
  653. int err;
  654. err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
  655. foreign_dev_check_cb, del_cb);
  656. if (err == -EOPNOTSUPP)
  657. err = 0;
  658. return err;
  659. }
  660. EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign);
  661. static int __switchdev_handle_port_attr_set(struct net_device *dev,
  662. struct switchdev_notifier_port_attr_info *port_attr_info,
  663. bool (*check_cb)(const struct net_device *dev),
  664. int (*set_cb)(struct net_device *dev, const void *ctx,
  665. const struct switchdev_attr *attr,
  666. struct netlink_ext_ack *extack))
  667. {
  668. struct switchdev_notifier_info *info = &port_attr_info->info;
  669. struct netlink_ext_ack *extack;
  670. struct net_device *lower_dev;
  671. struct list_head *iter;
  672. int err = -EOPNOTSUPP;
  673. extack = switchdev_notifier_info_to_extack(info);
  674. if (check_cb(dev)) {
  675. err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
  676. if (err != -EOPNOTSUPP)
  677. port_attr_info->handled = true;
  678. return err;
  679. }
  680. /* Switch ports might be stacked under e.g. a LAG. Ignore the
  681. * unsupported devices, another driver might be able to handle them. But
  682. * propagate to the callers any hard errors.
  683. *
  684. * If the driver does its own bookkeeping of stacked ports, it's not
  685. * necessary to go through this helper.
  686. */
  687. netdev_for_each_lower_dev(dev, lower_dev, iter) {
  688. if (netif_is_bridge_master(lower_dev))
  689. continue;
  690. err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
  691. check_cb, set_cb);
  692. if (err && err != -EOPNOTSUPP)
  693. return err;
  694. }
  695. return err;
  696. }
  697. int switchdev_handle_port_attr_set(struct net_device *dev,
  698. struct switchdev_notifier_port_attr_info *port_attr_info,
  699. bool (*check_cb)(const struct net_device *dev),
  700. int (*set_cb)(struct net_device *dev, const void *ctx,
  701. const struct switchdev_attr *attr,
  702. struct netlink_ext_ack *extack))
  703. {
  704. int err;
  705. err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
  706. set_cb);
  707. if (err == -EOPNOTSUPP)
  708. err = 0;
  709. return err;
  710. }
  711. EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
  712. int switchdev_bridge_port_offload(struct net_device *brport_dev,
  713. struct net_device *dev, const void *ctx,
  714. struct notifier_block *atomic_nb,
  715. struct notifier_block *blocking_nb,
  716. bool tx_fwd_offload,
  717. struct netlink_ext_ack *extack)
  718. {
  719. struct switchdev_notifier_brport_info brport_info = {
  720. .brport = {
  721. .dev = dev,
  722. .ctx = ctx,
  723. .atomic_nb = atomic_nb,
  724. .blocking_nb = blocking_nb,
  725. .tx_fwd_offload = tx_fwd_offload,
  726. },
  727. };
  728. int err;
  729. ASSERT_RTNL();
  730. err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED,
  731. brport_dev, &brport_info.info,
  732. extack);
  733. return notifier_to_errno(err);
  734. }
  735. EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload);
  736. void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
  737. const void *ctx,
  738. struct notifier_block *atomic_nb,
  739. struct notifier_block *blocking_nb)
  740. {
  741. struct switchdev_notifier_brport_info brport_info = {
  742. .brport = {
  743. .ctx = ctx,
  744. .atomic_nb = atomic_nb,
  745. .blocking_nb = blocking_nb,
  746. },
  747. };
  748. ASSERT_RTNL();
  749. call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED,
  750. brport_dev, &brport_info.info,
  751. NULL);
  752. }
  753. EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload);