br_switchdev.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kernel.h>
  3. #include <linux/list.h>
  4. #include <linux/netdevice.h>
  5. #include <linux/rtnetlink.h>
  6. #include <linux/skbuff.h>
  7. #include <net/ip.h>
  8. #include <net/switchdev.h>
  9. #include "br_private.h"
  10. static struct static_key_false br_switchdev_tx_fwd_offload;
  11. static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p,
  12. const struct sk_buff *skb)
  13. {
  14. if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
  15. return false;
  16. return (p->flags & BR_TX_FWD_OFFLOAD) &&
  17. (p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom);
  18. }
  19. bool br_switchdev_frame_uses_tx_fwd_offload(struct sk_buff *skb)
  20. {
  21. if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
  22. return false;
  23. return BR_INPUT_SKB_CB(skb)->tx_fwd_offload;
  24. }
  25. void br_switchdev_frame_set_offload_fwd_mark(struct sk_buff *skb)
  26. {
  27. skb->offload_fwd_mark = br_switchdev_frame_uses_tx_fwd_offload(skb);
  28. }
  29. /* Mark the frame for TX forwarding offload if this egress port supports it */
  30. void nbp_switchdev_frame_mark_tx_fwd_offload(const struct net_bridge_port *p,
  31. struct sk_buff *skb)
  32. {
  33. if (nbp_switchdev_can_offload_tx_fwd(p, skb))
  34. BR_INPUT_SKB_CB(skb)->tx_fwd_offload = true;
  35. }
  36. /* Lazily adds the hwdom of the egress bridge port to the bit mask of hwdoms
  37. * that the skb has been already forwarded to, to avoid further cloning to
  38. * other ports in the same hwdom by making nbp_switchdev_allowed_egress()
  39. * return false.
  40. */
  41. void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(const struct net_bridge_port *p,
  42. struct sk_buff *skb)
  43. {
  44. if (nbp_switchdev_can_offload_tx_fwd(p, skb))
  45. set_bit(p->hwdom, &BR_INPUT_SKB_CB(skb)->fwd_hwdoms);
  46. }
  47. void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
  48. struct sk_buff *skb)
  49. {
  50. if (p->hwdom)
  51. BR_INPUT_SKB_CB(skb)->src_hwdom = p->hwdom;
  52. }
  53. bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
  54. const struct sk_buff *skb)
  55. {
  56. struct br_input_skb_cb *cb = BR_INPUT_SKB_CB(skb);
  57. return !test_bit(p->hwdom, &cb->fwd_hwdoms) &&
  58. (!skb->offload_fwd_mark || cb->src_hwdom != p->hwdom);
  59. }
  60. /* Flags that can be offloaded to hardware */
  61. #define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | \
  62. BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED | \
  63. BR_HAIRPIN_MODE | BR_ISOLATED | BR_MULTICAST_TO_UNICAST)
  64. int br_switchdev_set_port_flag(struct net_bridge_port *p,
  65. unsigned long flags,
  66. unsigned long mask,
  67. struct netlink_ext_ack *extack)
  68. {
  69. struct switchdev_attr attr = {
  70. .orig_dev = p->dev,
  71. };
  72. struct switchdev_notifier_port_attr_info info = {
  73. .attr = &attr,
  74. };
  75. int err;
  76. mask &= BR_PORT_FLAGS_HW_OFFLOAD;
  77. if (!mask)
  78. return 0;
  79. attr.id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS;
  80. attr.u.brport_flags.val = flags;
  81. attr.u.brport_flags.mask = mask;
  82. /* We run from atomic context here */
  83. err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev,
  84. &info.info, extack);
  85. err = notifier_to_errno(err);
  86. if (err == -EOPNOTSUPP)
  87. return 0;
  88. if (err) {
  89. if (extack && !extack->_msg)
  90. NL_SET_ERR_MSG_MOD(extack,
  91. "bridge flag offload is not supported");
  92. return -EOPNOTSUPP;
  93. }
  94. attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS;
  95. attr.flags = SWITCHDEV_F_DEFER;
  96. err = switchdev_port_attr_set(p->dev, &attr, extack);
  97. if (err) {
  98. if (extack && !extack->_msg)
  99. NL_SET_ERR_MSG_MOD(extack,
  100. "error setting offload flag on port");
  101. return err;
  102. }
  103. return 0;
  104. }
  105. static void br_switchdev_fdb_populate(struct net_bridge *br,
  106. struct switchdev_notifier_fdb_info *item,
  107. const struct net_bridge_fdb_entry *fdb,
  108. const void *ctx)
  109. {
  110. const struct net_bridge_port *p = READ_ONCE(fdb->dst);
  111. item->addr = fdb->key.addr.addr;
  112. item->vid = fdb->key.vlan_id;
  113. item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
  114. item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags);
  115. item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
  116. item->info.dev = (!p || item->is_local) ? br->dev : p->dev;
  117. item->info.ctx = ctx;
  118. }
  119. void
  120. br_switchdev_fdb_notify(struct net_bridge *br,
  121. const struct net_bridge_fdb_entry *fdb, int type)
  122. {
  123. struct switchdev_notifier_fdb_info item;
  124. /* Entries with these flags were created using ndm_state == NUD_REACHABLE,
  125. * ndm_flags == NTF_MASTER( | NTF_STICKY), ext_flags == 0 by something
  126. * equivalent to 'bridge fdb add ... master dynamic (sticky)'.
  127. * Drivers don't know how to deal with these, so don't notify them to
  128. * avoid confusing them.
  129. */
  130. if (test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags) &&
  131. !test_bit(BR_FDB_STATIC, &fdb->flags) &&
  132. !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
  133. return;
  134. br_switchdev_fdb_populate(br, &item, fdb, NULL);
  135. switch (type) {
  136. case RTM_DELNEIGH:
  137. call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE,
  138. item.info.dev, &item.info, NULL);
  139. break;
  140. case RTM_NEWNEIGH:
  141. call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE,
  142. item.info.dev, &item.info, NULL);
  143. break;
  144. }
  145. }
  146. int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
  147. bool changed, struct netlink_ext_ack *extack)
  148. {
  149. struct switchdev_obj_port_vlan v = {
  150. .obj.orig_dev = dev,
  151. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  152. .flags = flags,
  153. .vid = vid,
  154. .changed = changed,
  155. };
  156. return switchdev_port_obj_add(dev, &v.obj, extack);
  157. }
  158. int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid)
  159. {
  160. struct switchdev_obj_port_vlan v = {
  161. .obj.orig_dev = dev,
  162. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  163. .vid = vid,
  164. };
  165. return switchdev_port_obj_del(dev, &v.obj);
  166. }
  167. static int nbp_switchdev_hwdom_set(struct net_bridge_port *joining)
  168. {
  169. struct net_bridge *br = joining->br;
  170. struct net_bridge_port *p;
  171. int hwdom;
  172. /* joining is yet to be added to the port list. */
  173. list_for_each_entry(p, &br->port_list, list) {
  174. if (netdev_phys_item_id_same(&joining->ppid, &p->ppid)) {
  175. joining->hwdom = p->hwdom;
  176. return 0;
  177. }
  178. }
  179. hwdom = find_next_zero_bit(&br->busy_hwdoms, BR_HWDOM_MAX, 1);
  180. if (hwdom >= BR_HWDOM_MAX)
  181. return -EBUSY;
  182. set_bit(hwdom, &br->busy_hwdoms);
  183. joining->hwdom = hwdom;
  184. return 0;
  185. }
  186. static void nbp_switchdev_hwdom_put(struct net_bridge_port *leaving)
  187. {
  188. struct net_bridge *br = leaving->br;
  189. struct net_bridge_port *p;
  190. /* leaving is no longer in the port list. */
  191. list_for_each_entry(p, &br->port_list, list) {
  192. if (p->hwdom == leaving->hwdom)
  193. return;
  194. }
  195. clear_bit(leaving->hwdom, &br->busy_hwdoms);
  196. }
  197. static int nbp_switchdev_add(struct net_bridge_port *p,
  198. struct netdev_phys_item_id ppid,
  199. bool tx_fwd_offload,
  200. struct netlink_ext_ack *extack)
  201. {
  202. int err;
  203. if (p->offload_count) {
  204. /* Prevent unsupported configurations such as a bridge port
  205. * which is a bonding interface, and the member ports are from
  206. * different hardware switches.
  207. */
  208. if (!netdev_phys_item_id_same(&p->ppid, &ppid)) {
  209. NL_SET_ERR_MSG_MOD(extack,
  210. "Same bridge port cannot be offloaded by two physical switches");
  211. return -EBUSY;
  212. }
  213. /* Tolerate drivers that call switchdev_bridge_port_offload()
  214. * more than once for the same bridge port, such as when the
  215. * bridge port is an offloaded bonding/team interface.
  216. */
  217. p->offload_count++;
  218. return 0;
  219. }
  220. p->ppid = ppid;
  221. p->offload_count = 1;
  222. err = nbp_switchdev_hwdom_set(p);
  223. if (err)
  224. return err;
  225. if (tx_fwd_offload) {
  226. p->flags |= BR_TX_FWD_OFFLOAD;
  227. static_branch_inc(&br_switchdev_tx_fwd_offload);
  228. }
  229. return 0;
  230. }
  231. static void nbp_switchdev_del(struct net_bridge_port *p)
  232. {
  233. if (WARN_ON(!p->offload_count))
  234. return;
  235. p->offload_count--;
  236. if (p->offload_count)
  237. return;
  238. if (p->hwdom)
  239. nbp_switchdev_hwdom_put(p);
  240. if (p->flags & BR_TX_FWD_OFFLOAD) {
  241. p->flags &= ~BR_TX_FWD_OFFLOAD;
  242. static_branch_dec(&br_switchdev_tx_fwd_offload);
  243. }
  244. }
  245. static int
  246. br_switchdev_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb,
  247. const struct net_bridge_fdb_entry *fdb,
  248. unsigned long action, const void *ctx)
  249. {
  250. struct switchdev_notifier_fdb_info item;
  251. int err;
  252. br_switchdev_fdb_populate(br, &item, fdb, ctx);
  253. err = nb->notifier_call(nb, action, &item);
  254. return notifier_to_errno(err);
  255. }
  256. static int
  257. br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx,
  258. bool adding, struct notifier_block *nb)
  259. {
  260. struct net_bridge_fdb_entry *fdb;
  261. struct net_bridge *br;
  262. unsigned long action;
  263. int err = 0;
  264. if (!nb)
  265. return 0;
  266. if (!netif_is_bridge_master(br_dev))
  267. return -EINVAL;
  268. br = netdev_priv(br_dev);
  269. if (adding)
  270. action = SWITCHDEV_FDB_ADD_TO_DEVICE;
  271. else
  272. action = SWITCHDEV_FDB_DEL_TO_DEVICE;
  273. rcu_read_lock();
  274. hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) {
  275. err = br_switchdev_fdb_replay_one(br, nb, fdb, action, ctx);
  276. if (err)
  277. break;
  278. }
  279. rcu_read_unlock();
  280. return err;
  281. }
  282. static int br_switchdev_vlan_attr_replay(struct net_device *br_dev,
  283. const void *ctx,
  284. struct notifier_block *nb,
  285. struct netlink_ext_ack *extack)
  286. {
  287. struct switchdev_notifier_port_attr_info attr_info = {
  288. .info = {
  289. .dev = br_dev,
  290. .extack = extack,
  291. .ctx = ctx,
  292. },
  293. };
  294. struct net_bridge *br = netdev_priv(br_dev);
  295. struct net_bridge_vlan_group *vg;
  296. struct switchdev_attr attr;
  297. struct net_bridge_vlan *v;
  298. int err;
  299. attr_info.attr = &attr;
  300. attr.orig_dev = br_dev;
  301. vg = br_vlan_group(br);
  302. if (!vg)
  303. return 0;
  304. list_for_each_entry(v, &vg->vlan_list, vlist) {
  305. if (v->msti) {
  306. attr.id = SWITCHDEV_ATTR_ID_VLAN_MSTI;
  307. attr.u.vlan_msti.vid = v->vid;
  308. attr.u.vlan_msti.msti = v->msti;
  309. err = nb->notifier_call(nb, SWITCHDEV_PORT_ATTR_SET,
  310. &attr_info);
  311. err = notifier_to_errno(err);
  312. if (err)
  313. return err;
  314. }
  315. }
  316. return 0;
  317. }
  318. static int
  319. br_switchdev_vlan_replay_one(struct notifier_block *nb,
  320. struct net_device *dev,
  321. struct switchdev_obj_port_vlan *vlan,
  322. const void *ctx, unsigned long action,
  323. struct netlink_ext_ack *extack)
  324. {
  325. struct switchdev_notifier_port_obj_info obj_info = {
  326. .info = {
  327. .dev = dev,
  328. .extack = extack,
  329. .ctx = ctx,
  330. },
  331. .obj = &vlan->obj,
  332. };
  333. int err;
  334. err = nb->notifier_call(nb, action, &obj_info);
  335. return notifier_to_errno(err);
  336. }
  337. static int br_switchdev_vlan_replay_group(struct notifier_block *nb,
  338. struct net_device *dev,
  339. struct net_bridge_vlan_group *vg,
  340. const void *ctx, unsigned long action,
  341. struct netlink_ext_ack *extack)
  342. {
  343. struct net_bridge_vlan *v;
  344. int err = 0;
  345. u16 pvid;
  346. if (!vg)
  347. return 0;
  348. pvid = br_get_pvid(vg);
  349. list_for_each_entry(v, &vg->vlan_list, vlist) {
  350. struct switchdev_obj_port_vlan vlan = {
  351. .obj.orig_dev = dev,
  352. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  353. .flags = br_vlan_flags(v, pvid),
  354. .vid = v->vid,
  355. };
  356. if (!br_vlan_should_use(v))
  357. continue;
  358. err = br_switchdev_vlan_replay_one(nb, dev, &vlan, ctx,
  359. action, extack);
  360. if (err)
  361. return err;
  362. }
  363. return 0;
  364. }
  365. static int br_switchdev_vlan_replay(struct net_device *br_dev,
  366. const void *ctx, bool adding,
  367. struct notifier_block *nb,
  368. struct netlink_ext_ack *extack)
  369. {
  370. struct net_bridge *br = netdev_priv(br_dev);
  371. struct net_bridge_port *p;
  372. unsigned long action;
  373. int err;
  374. ASSERT_RTNL();
  375. if (!nb)
  376. return 0;
  377. if (!netif_is_bridge_master(br_dev))
  378. return -EINVAL;
  379. if (adding)
  380. action = SWITCHDEV_PORT_OBJ_ADD;
  381. else
  382. action = SWITCHDEV_PORT_OBJ_DEL;
  383. err = br_switchdev_vlan_replay_group(nb, br_dev, br_vlan_group(br),
  384. ctx, action, extack);
  385. if (err)
  386. return err;
  387. list_for_each_entry(p, &br->port_list, list) {
  388. struct net_device *dev = p->dev;
  389. err = br_switchdev_vlan_replay_group(nb, dev,
  390. nbp_vlan_group(p),
  391. ctx, action, extack);
  392. if (err)
  393. return err;
  394. }
  395. if (adding) {
  396. err = br_switchdev_vlan_attr_replay(br_dev, ctx, nb, extack);
  397. if (err)
  398. return err;
  399. }
  400. return 0;
  401. }
  402. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  403. struct br_switchdev_mdb_complete_info {
  404. struct net_bridge_port *port;
  405. struct br_ip ip;
  406. };
  407. static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *priv)
  408. {
  409. struct br_switchdev_mdb_complete_info *data = priv;
  410. struct net_bridge_port_group __rcu **pp;
  411. struct net_bridge_port_group *p;
  412. struct net_bridge_mdb_entry *mp;
  413. struct net_bridge_port *port = data->port;
  414. struct net_bridge *br = port->br;
  415. if (err)
  416. goto err;
  417. spin_lock_bh(&br->multicast_lock);
  418. mp = br_mdb_ip_get(br, &data->ip);
  419. if (!mp)
  420. goto out;
  421. for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
  422. pp = &p->next) {
  423. if (p->key.port != port)
  424. continue;
  425. p->flags |= MDB_PG_FLAGS_OFFLOAD;
  426. }
  427. out:
  428. spin_unlock_bh(&br->multicast_lock);
  429. err:
  430. kfree(priv);
  431. }
  432. static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
  433. const struct net_bridge_mdb_entry *mp)
  434. {
  435. if (mp->addr.proto == htons(ETH_P_IP))
  436. ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
  437. #if IS_ENABLED(CONFIG_IPV6)
  438. else if (mp->addr.proto == htons(ETH_P_IPV6))
  439. ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
  440. #endif
  441. else
  442. ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
  443. mdb->vid = mp->addr.vid;
  444. }
  445. static void br_switchdev_host_mdb_one(struct net_device *dev,
  446. struct net_device *lower_dev,
  447. struct net_bridge_mdb_entry *mp,
  448. int type)
  449. {
  450. struct switchdev_obj_port_mdb mdb = {
  451. .obj = {
  452. .id = SWITCHDEV_OBJ_ID_HOST_MDB,
  453. .flags = SWITCHDEV_F_DEFER,
  454. .orig_dev = dev,
  455. },
  456. };
  457. br_switchdev_mdb_populate(&mdb, mp);
  458. switch (type) {
  459. case RTM_NEWMDB:
  460. switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
  461. break;
  462. case RTM_DELMDB:
  463. switchdev_port_obj_del(lower_dev, &mdb.obj);
  464. break;
  465. }
  466. }
  467. static void br_switchdev_host_mdb(struct net_device *dev,
  468. struct net_bridge_mdb_entry *mp, int type)
  469. {
  470. struct net_device *lower_dev;
  471. struct list_head *iter;
  472. netdev_for_each_lower_dev(dev, lower_dev, iter)
  473. br_switchdev_host_mdb_one(dev, lower_dev, mp, type);
  474. }
  475. static int
  476. br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
  477. const struct switchdev_obj_port_mdb *mdb,
  478. unsigned long action, const void *ctx,
  479. struct netlink_ext_ack *extack)
  480. {
  481. struct switchdev_notifier_port_obj_info obj_info = {
  482. .info = {
  483. .dev = dev,
  484. .extack = extack,
  485. .ctx = ctx,
  486. },
  487. .obj = &mdb->obj,
  488. };
  489. int err;
  490. err = nb->notifier_call(nb, action, &obj_info);
  491. return notifier_to_errno(err);
  492. }
  493. static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
  494. enum switchdev_obj_id id,
  495. const struct net_bridge_mdb_entry *mp,
  496. struct net_device *orig_dev)
  497. {
  498. struct switchdev_obj_port_mdb *mdb;
  499. mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
  500. if (!mdb)
  501. return -ENOMEM;
  502. mdb->obj.id = id;
  503. mdb->obj.orig_dev = orig_dev;
  504. br_switchdev_mdb_populate(mdb, mp);
  505. list_add_tail(&mdb->obj.list, mdb_list);
  506. return 0;
  507. }
  508. void br_switchdev_mdb_notify(struct net_device *dev,
  509. struct net_bridge_mdb_entry *mp,
  510. struct net_bridge_port_group *pg,
  511. int type)
  512. {
  513. struct br_switchdev_mdb_complete_info *complete_info;
  514. struct switchdev_obj_port_mdb mdb = {
  515. .obj = {
  516. .id = SWITCHDEV_OBJ_ID_PORT_MDB,
  517. .flags = SWITCHDEV_F_DEFER,
  518. },
  519. };
  520. if (!pg)
  521. return br_switchdev_host_mdb(dev, mp, type);
  522. br_switchdev_mdb_populate(&mdb, mp);
  523. mdb.obj.orig_dev = pg->key.port->dev;
  524. switch (type) {
  525. case RTM_NEWMDB:
  526. complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
  527. if (!complete_info)
  528. break;
  529. complete_info->port = pg->key.port;
  530. complete_info->ip = mp->addr;
  531. mdb.obj.complete_priv = complete_info;
  532. mdb.obj.complete = br_switchdev_mdb_complete;
  533. if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
  534. kfree(complete_info);
  535. break;
  536. case RTM_DELMDB:
  537. switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
  538. break;
  539. }
  540. }
  541. #endif
  542. static int
  543. br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
  544. const void *ctx, bool adding, struct notifier_block *nb,
  545. struct netlink_ext_ack *extack)
  546. {
  547. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  548. const struct net_bridge_mdb_entry *mp;
  549. struct switchdev_obj *obj, *tmp;
  550. struct net_bridge *br;
  551. unsigned long action;
  552. LIST_HEAD(mdb_list);
  553. int err = 0;
  554. ASSERT_RTNL();
  555. if (!nb)
  556. return 0;
  557. if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
  558. return -EINVAL;
  559. br = netdev_priv(br_dev);
  560. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
  561. return 0;
  562. /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
  563. * because the write-side protection is br->multicast_lock. But we
  564. * need to emulate the [ blocking ] calling context of a regular
  565. * switchdev event, so since both br->multicast_lock and RCU read side
  566. * critical sections are atomic, we have no choice but to pick the RCU
  567. * read side lock, queue up all our events, leave the critical section
  568. * and notify switchdev from blocking context.
  569. */
  570. rcu_read_lock();
  571. hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
  572. struct net_bridge_port_group __rcu * const *pp;
  573. const struct net_bridge_port_group *p;
  574. if (mp->host_joined) {
  575. err = br_switchdev_mdb_queue_one(&mdb_list,
  576. SWITCHDEV_OBJ_ID_HOST_MDB,
  577. mp, br_dev);
  578. if (err) {
  579. rcu_read_unlock();
  580. goto out_free_mdb;
  581. }
  582. }
  583. for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
  584. pp = &p->next) {
  585. if (p->key.port->dev != dev)
  586. continue;
  587. err = br_switchdev_mdb_queue_one(&mdb_list,
  588. SWITCHDEV_OBJ_ID_PORT_MDB,
  589. mp, dev);
  590. if (err) {
  591. rcu_read_unlock();
  592. goto out_free_mdb;
  593. }
  594. }
  595. }
  596. rcu_read_unlock();
  597. if (adding)
  598. action = SWITCHDEV_PORT_OBJ_ADD;
  599. else
  600. action = SWITCHDEV_PORT_OBJ_DEL;
  601. list_for_each_entry(obj, &mdb_list, list) {
  602. err = br_switchdev_mdb_replay_one(nb, dev,
  603. SWITCHDEV_OBJ_PORT_MDB(obj),
  604. action, ctx, extack);
  605. if (err)
  606. goto out_free_mdb;
  607. }
  608. out_free_mdb:
  609. list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
  610. list_del(&obj->list);
  611. kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
  612. }
  613. if (err)
  614. return err;
  615. #endif
  616. return 0;
  617. }
  618. static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx,
  619. struct notifier_block *atomic_nb,
  620. struct notifier_block *blocking_nb,
  621. struct netlink_ext_ack *extack)
  622. {
  623. struct net_device *br_dev = p->br->dev;
  624. struct net_device *dev = p->dev;
  625. int err;
  626. err = br_switchdev_vlan_replay(br_dev, ctx, true, blocking_nb, extack);
  627. if (err && err != -EOPNOTSUPP)
  628. return err;
  629. err = br_switchdev_mdb_replay(br_dev, dev, ctx, true, blocking_nb,
  630. extack);
  631. if (err && err != -EOPNOTSUPP)
  632. return err;
  633. err = br_switchdev_fdb_replay(br_dev, ctx, true, atomic_nb);
  634. if (err && err != -EOPNOTSUPP)
  635. return err;
  636. return 0;
  637. }
  638. static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
  639. const void *ctx,
  640. struct notifier_block *atomic_nb,
  641. struct notifier_block *blocking_nb)
  642. {
  643. struct net_device *br_dev = p->br->dev;
  644. struct net_device *dev = p->dev;
  645. br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb);
  646. br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
  647. br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
  648. }
  649. /* Let the bridge know that this port is offloaded, so that it can assign a
  650. * switchdev hardware domain to it.
  651. */
  652. int br_switchdev_port_offload(struct net_bridge_port *p,
  653. struct net_device *dev, const void *ctx,
  654. struct notifier_block *atomic_nb,
  655. struct notifier_block *blocking_nb,
  656. bool tx_fwd_offload,
  657. struct netlink_ext_ack *extack)
  658. {
  659. struct netdev_phys_item_id ppid;
  660. int err;
  661. err = dev_get_port_parent_id(dev, &ppid, false);
  662. if (err)
  663. return err;
  664. err = nbp_switchdev_add(p, ppid, tx_fwd_offload, extack);
  665. if (err)
  666. return err;
  667. err = nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack);
  668. if (err)
  669. goto out_switchdev_del;
  670. return 0;
  671. out_switchdev_del:
  672. nbp_switchdev_del(p);
  673. return err;
  674. }
  675. void br_switchdev_port_unoffload(struct net_bridge_port *p, const void *ctx,
  676. struct notifier_block *atomic_nb,
  677. struct notifier_block *blocking_nb)
  678. {
  679. nbp_switchdev_unsync_objs(p, ctx, atomic_nb, blocking_nb);
  680. nbp_switchdev_del(p);
  681. }