br_if.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Userspace interface
  4. * Linux ethernet bridge
  5. *
  6. * Authors:
  7. * Lennert Buytenhek <[email protected]>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/etherdevice.h>
  12. #include <linux/netpoll.h>
  13. #include <linux/ethtool.h>
  14. #include <linux/if_arp.h>
  15. #include <linux/module.h>
  16. #include <linux/init.h>
  17. #include <linux/rtnetlink.h>
  18. #include <linux/if_ether.h>
  19. #include <linux/slab.h>
  20. #include <net/dsa.h>
  21. #include <net/sock.h>
  22. #include <linux/if_vlan.h>
  23. #include <net/switchdev.h>
  24. #include <net/net_namespace.h>
  25. #include "br_private.h"
  26. /*
  27. * Determine initial path cost based on speed.
  28. * using recommendations from 802.1d standard
  29. *
  30. * Since driver might sleep need to not be holding any locks.
  31. */
  32. static int port_cost(struct net_device *dev)
  33. {
  34. struct ethtool_link_ksettings ecmd;
  35. if (!__ethtool_get_link_ksettings(dev, &ecmd)) {
  36. switch (ecmd.base.speed) {
  37. case SPEED_10000:
  38. return 2;
  39. case SPEED_5000:
  40. return 3;
  41. case SPEED_2500:
  42. return 4;
  43. case SPEED_1000:
  44. return 5;
  45. case SPEED_100:
  46. return 19;
  47. case SPEED_10:
  48. return 100;
  49. case SPEED_UNKNOWN:
  50. return 100;
  51. default:
  52. if (ecmd.base.speed > SPEED_10000)
  53. return 1;
  54. }
  55. }
  56. /* Old silly heuristics based on name */
  57. if (!strncmp(dev->name, "lec", 3))
  58. return 7;
  59. if (!strncmp(dev->name, "plip", 4))
  60. return 2500;
  61. return 100; /* assume old 10Mbps */
  62. }
  63. /* Check for port carrier transitions. */
  64. void br_port_carrier_check(struct net_bridge_port *p, bool *notified)
  65. {
  66. struct net_device *dev = p->dev;
  67. struct net_bridge *br = p->br;
  68. if (!(p->flags & BR_ADMIN_COST) &&
  69. netif_running(dev) && netif_oper_up(dev))
  70. p->path_cost = port_cost(dev);
  71. *notified = false;
  72. if (!netif_running(br->dev))
  73. return;
  74. spin_lock_bh(&br->lock);
  75. if (netif_running(dev) && netif_oper_up(dev)) {
  76. if (p->state == BR_STATE_DISABLED) {
  77. br_stp_enable_port(p);
  78. *notified = true;
  79. }
  80. } else {
  81. if (p->state != BR_STATE_DISABLED) {
  82. br_stp_disable_port(p);
  83. *notified = true;
  84. }
  85. }
  86. spin_unlock_bh(&br->lock);
  87. }
  88. static void br_port_set_promisc(struct net_bridge_port *p)
  89. {
  90. int err = 0;
  91. if (br_promisc_port(p))
  92. return;
  93. err = dev_set_promiscuity(p->dev, 1);
  94. if (err)
  95. return;
  96. br_fdb_unsync_static(p->br, p);
  97. p->flags |= BR_PROMISC;
  98. }
  99. static void br_port_clear_promisc(struct net_bridge_port *p)
  100. {
  101. int err;
  102. /* Check if the port is already non-promisc or if it doesn't
  103. * support UNICAST filtering. Without unicast filtering support
  104. * we'll end up re-enabling promisc mode anyway, so just check for
  105. * it here.
  106. */
  107. if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
  108. return;
  109. /* Since we'll be clearing the promisc mode, program the port
  110. * first so that we don't have interruption in traffic.
  111. */
  112. err = br_fdb_sync_static(p->br, p);
  113. if (err)
  114. return;
  115. dev_set_promiscuity(p->dev, -1);
  116. p->flags &= ~BR_PROMISC;
  117. }
  118. /* When a port is added or removed or when certain port flags
  119. * change, this function is called to automatically manage
  120. * promiscuity setting of all the bridge ports. We are always called
  121. * under RTNL so can skip using rcu primitives.
  122. */
  123. void br_manage_promisc(struct net_bridge *br)
  124. {
  125. struct net_bridge_port *p;
  126. bool set_all = false;
  127. /* If vlan filtering is disabled or bridge interface is placed
  128. * into promiscuous mode, place all ports in promiscuous mode.
  129. */
  130. if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev))
  131. set_all = true;
  132. list_for_each_entry(p, &br->port_list, list) {
  133. if (set_all) {
  134. br_port_set_promisc(p);
  135. } else {
  136. /* If the number of auto-ports is <= 1, then all other
  137. * ports will have their output configuration
  138. * statically specified through fdbs. Since ingress
  139. * on the auto-port becomes forwarding/egress to other
  140. * ports and egress configuration is statically known,
  141. * we can say that ingress configuration of the
  142. * auto-port is also statically known.
  143. * This lets us disable promiscuous mode and write
  144. * this config to hw.
  145. */
  146. if ((p->dev->priv_flags & IFF_UNICAST_FLT) &&
  147. (br->auto_cnt == 0 ||
  148. (br->auto_cnt == 1 && br_auto_port(p))))
  149. br_port_clear_promisc(p);
  150. else
  151. br_port_set_promisc(p);
  152. }
  153. }
  154. }
  155. int nbp_backup_change(struct net_bridge_port *p,
  156. struct net_device *backup_dev)
  157. {
  158. struct net_bridge_port *old_backup = rtnl_dereference(p->backup_port);
  159. struct net_bridge_port *backup_p = NULL;
  160. ASSERT_RTNL();
  161. if (backup_dev) {
  162. if (!netif_is_bridge_port(backup_dev))
  163. return -ENOENT;
  164. backup_p = br_port_get_rtnl(backup_dev);
  165. if (backup_p->br != p->br)
  166. return -EINVAL;
  167. }
  168. if (p == backup_p)
  169. return -EINVAL;
  170. if (old_backup == backup_p)
  171. return 0;
  172. /* if the backup link is already set, clear it */
  173. if (old_backup)
  174. old_backup->backup_redirected_cnt--;
  175. if (backup_p)
  176. backup_p->backup_redirected_cnt++;
  177. rcu_assign_pointer(p->backup_port, backup_p);
  178. return 0;
  179. }
  180. static void nbp_backup_clear(struct net_bridge_port *p)
  181. {
  182. nbp_backup_change(p, NULL);
  183. if (p->backup_redirected_cnt) {
  184. struct net_bridge_port *cur_p;
  185. list_for_each_entry(cur_p, &p->br->port_list, list) {
  186. struct net_bridge_port *backup_p;
  187. backup_p = rtnl_dereference(cur_p->backup_port);
  188. if (backup_p == p)
  189. nbp_backup_change(cur_p, NULL);
  190. }
  191. }
  192. WARN_ON(rcu_access_pointer(p->backup_port) || p->backup_redirected_cnt);
  193. }
  194. static void nbp_update_port_count(struct net_bridge *br)
  195. {
  196. struct net_bridge_port *p;
  197. u32 cnt = 0;
  198. list_for_each_entry(p, &br->port_list, list) {
  199. if (br_auto_port(p))
  200. cnt++;
  201. }
  202. if (br->auto_cnt != cnt) {
  203. br->auto_cnt = cnt;
  204. br_manage_promisc(br);
  205. }
  206. }
  207. static void nbp_delete_promisc(struct net_bridge_port *p)
  208. {
  209. /* If port is currently promiscuous, unset promiscuity.
  210. * Otherwise, it is a static port so remove all addresses
  211. * from it.
  212. */
  213. dev_set_allmulti(p->dev, -1);
  214. if (br_promisc_port(p))
  215. dev_set_promiscuity(p->dev, -1);
  216. else
  217. br_fdb_unsync_static(p->br, p);
  218. }
  219. static void release_nbp(struct kobject *kobj)
  220. {
  221. struct net_bridge_port *p
  222. = container_of(kobj, struct net_bridge_port, kobj);
  223. kfree(p);
  224. }
  225. static void brport_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
  226. {
  227. struct net_bridge_port *p = kobj_to_brport(kobj);
  228. net_ns_get_ownership(dev_net(p->dev), uid, gid);
  229. }
  230. static struct kobj_type brport_ktype = {
  231. #ifdef CONFIG_SYSFS
  232. .sysfs_ops = &brport_sysfs_ops,
  233. #endif
  234. .release = release_nbp,
  235. .get_ownership = brport_get_ownership,
  236. };
  237. static void destroy_nbp(struct net_bridge_port *p)
  238. {
  239. struct net_device *dev = p->dev;
  240. p->br = NULL;
  241. p->dev = NULL;
  242. netdev_put(dev, &p->dev_tracker);
  243. kobject_put(&p->kobj);
  244. }
  245. static void destroy_nbp_rcu(struct rcu_head *head)
  246. {
  247. struct net_bridge_port *p =
  248. container_of(head, struct net_bridge_port, rcu);
  249. destroy_nbp(p);
  250. }
  251. static unsigned get_max_headroom(struct net_bridge *br)
  252. {
  253. unsigned max_headroom = 0;
  254. struct net_bridge_port *p;
  255. list_for_each_entry(p, &br->port_list, list) {
  256. unsigned dev_headroom = netdev_get_fwd_headroom(p->dev);
  257. if (dev_headroom > max_headroom)
  258. max_headroom = dev_headroom;
  259. }
  260. return max_headroom;
  261. }
  262. static void update_headroom(struct net_bridge *br, int new_hr)
  263. {
  264. struct net_bridge_port *p;
  265. list_for_each_entry(p, &br->port_list, list)
  266. netdev_set_rx_headroom(p->dev, new_hr);
  267. br->dev->needed_headroom = new_hr;
  268. }
  269. /* Delete port(interface) from bridge is done in two steps.
  270. * via RCU. First step, marks device as down. That deletes
  271. * all the timers and stops new packets from flowing through.
  272. *
  273. * Final cleanup doesn't occur until after all CPU's finished
  274. * processing packets.
  275. *
  276. * Protected from multiple admin operations by RTNL mutex
  277. */
  278. static void del_nbp(struct net_bridge_port *p)
  279. {
  280. struct net_bridge *br = p->br;
  281. struct net_device *dev = p->dev;
  282. sysfs_remove_link(br->ifobj, p->dev->name);
  283. nbp_delete_promisc(p);
  284. spin_lock_bh(&br->lock);
  285. br_stp_disable_port(p);
  286. spin_unlock_bh(&br->lock);
  287. br_mrp_port_del(br, p);
  288. br_cfm_port_del(br, p);
  289. br_ifinfo_notify(RTM_DELLINK, NULL, p);
  290. list_del_rcu(&p->list);
  291. if (netdev_get_fwd_headroom(dev) == br->dev->needed_headroom)
  292. update_headroom(br, get_max_headroom(br));
  293. netdev_reset_rx_headroom(dev);
  294. nbp_vlan_flush(p);
  295. br_fdb_delete_by_port(br, p, 0, 1);
  296. switchdev_deferred_process();
  297. nbp_backup_clear(p);
  298. nbp_update_port_count(br);
  299. netdev_upper_dev_unlink(dev, br->dev);
  300. dev->priv_flags &= ~IFF_BRIDGE_PORT;
  301. netdev_rx_handler_unregister(dev);
  302. br_multicast_del_port(p);
  303. kobject_uevent(&p->kobj, KOBJ_REMOVE);
  304. kobject_del(&p->kobj);
  305. br_netpoll_disable(p);
  306. call_rcu(&p->rcu, destroy_nbp_rcu);
  307. }
  308. /* Delete bridge device */
  309. void br_dev_delete(struct net_device *dev, struct list_head *head)
  310. {
  311. struct net_bridge *br = netdev_priv(dev);
  312. struct net_bridge_port *p, *n;
  313. list_for_each_entry_safe(p, n, &br->port_list, list) {
  314. del_nbp(p);
  315. }
  316. br_recalculate_neigh_suppress_enabled(br);
  317. br_fdb_delete_by_port(br, NULL, 0, 1);
  318. cancel_delayed_work_sync(&br->gc_work);
  319. br_sysfs_delbr(br->dev);
  320. unregister_netdevice_queue(br->dev, head);
  321. }
  322. /* find an available port number */
  323. static int find_portno(struct net_bridge *br)
  324. {
  325. int index;
  326. struct net_bridge_port *p;
  327. unsigned long *inuse;
  328. inuse = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
  329. if (!inuse)
  330. return -ENOMEM;
  331. __set_bit(0, inuse); /* zero is reserved */
  332. list_for_each_entry(p, &br->port_list, list)
  333. __set_bit(p->port_no, inuse);
  334. index = find_first_zero_bit(inuse, BR_MAX_PORTS);
  335. bitmap_free(inuse);
  336. return (index >= BR_MAX_PORTS) ? -EXFULL : index;
  337. }
  338. /* called with RTNL but without bridge lock */
  339. static struct net_bridge_port *new_nbp(struct net_bridge *br,
  340. struct net_device *dev)
  341. {
  342. struct net_bridge_port *p;
  343. int index, err;
  344. index = find_portno(br);
  345. if (index < 0)
  346. return ERR_PTR(index);
  347. p = kzalloc(sizeof(*p), GFP_KERNEL);
  348. if (p == NULL)
  349. return ERR_PTR(-ENOMEM);
  350. p->br = br;
  351. netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
  352. p->dev = dev;
  353. p->path_cost = port_cost(dev);
  354. p->priority = 0x8000 >> BR_PORT_BITS;
  355. p->port_no = index;
  356. p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
  357. br_init_port(p);
  358. br_set_state(p, BR_STATE_DISABLED);
  359. br_stp_port_timer_init(p);
  360. err = br_multicast_add_port(p);
  361. if (err) {
  362. netdev_put(dev, &p->dev_tracker);
  363. kfree(p);
  364. p = ERR_PTR(err);
  365. }
  366. return p;
  367. }
  368. int br_add_bridge(struct net *net, const char *name)
  369. {
  370. struct net_device *dev;
  371. int res;
  372. dev = alloc_netdev(sizeof(struct net_bridge), name, NET_NAME_UNKNOWN,
  373. br_dev_setup);
  374. if (!dev)
  375. return -ENOMEM;
  376. dev_net_set(dev, net);
  377. dev->rtnl_link_ops = &br_link_ops;
  378. res = register_netdevice(dev);
  379. if (res)
  380. free_netdev(dev);
  381. return res;
  382. }
  383. int br_del_bridge(struct net *net, const char *name)
  384. {
  385. struct net_device *dev;
  386. int ret = 0;
  387. dev = __dev_get_by_name(net, name);
  388. if (dev == NULL)
  389. ret = -ENXIO; /* Could not find device */
  390. else if (!netif_is_bridge_master(dev)) {
  391. /* Attempt to delete non bridge device! */
  392. ret = -EPERM;
  393. }
  394. else if (dev->flags & IFF_UP) {
  395. /* Not shutdown yet. */
  396. ret = -EBUSY;
  397. }
  398. else
  399. br_dev_delete(dev, NULL);
  400. return ret;
  401. }
  402. /* MTU of the bridge pseudo-device: ETH_DATA_LEN or the minimum of the ports */
  403. static int br_mtu_min(const struct net_bridge *br)
  404. {
  405. const struct net_bridge_port *p;
  406. int ret_mtu = 0;
  407. list_for_each_entry(p, &br->port_list, list)
  408. if (!ret_mtu || ret_mtu > p->dev->mtu)
  409. ret_mtu = p->dev->mtu;
  410. return ret_mtu ? ret_mtu : ETH_DATA_LEN;
  411. }
  412. void br_mtu_auto_adjust(struct net_bridge *br)
  413. {
  414. ASSERT_RTNL();
  415. /* if the bridge MTU was manually configured don't mess with it */
  416. if (br_opt_get(br, BROPT_MTU_SET_BY_USER))
  417. return;
  418. /* change to the minimum MTU and clear the flag which was set by
  419. * the bridge ndo_change_mtu callback
  420. */
  421. dev_set_mtu(br->dev, br_mtu_min(br));
  422. br_opt_toggle(br, BROPT_MTU_SET_BY_USER, false);
  423. }
  424. static void br_set_gso_limits(struct net_bridge *br)
  425. {
  426. unsigned int tso_max_size = TSO_MAX_SIZE;
  427. const struct net_bridge_port *p;
  428. u16 tso_max_segs = TSO_MAX_SEGS;
  429. list_for_each_entry(p, &br->port_list, list) {
  430. tso_max_size = min(tso_max_size, p->dev->tso_max_size);
  431. tso_max_segs = min(tso_max_segs, p->dev->tso_max_segs);
  432. }
  433. netif_set_tso_max_size(br->dev, tso_max_size);
  434. netif_set_tso_max_segs(br->dev, tso_max_segs);
  435. }
  436. /*
  437. * Recomputes features using slave's features
  438. */
  439. netdev_features_t br_features_recompute(struct net_bridge *br,
  440. netdev_features_t features)
  441. {
  442. struct net_bridge_port *p;
  443. netdev_features_t mask;
  444. if (list_empty(&br->port_list))
  445. return features;
  446. mask = features;
  447. features &= ~NETIF_F_ONE_FOR_ALL;
  448. list_for_each_entry(p, &br->port_list, list) {
  449. features = netdev_increment_features(features,
  450. p->dev->features, mask);
  451. }
  452. features = netdev_add_tso_features(features, mask);
  453. return features;
  454. }
  455. /* called with RTNL */
  456. int br_add_if(struct net_bridge *br, struct net_device *dev,
  457. struct netlink_ext_ack *extack)
  458. {
  459. struct net_bridge_port *p;
  460. int err = 0;
  461. unsigned br_hr, dev_hr;
  462. bool changed_addr, fdb_synced = false;
  463. /* Don't allow bridging non-ethernet like devices. */
  464. if ((dev->flags & IFF_LOOPBACK) ||
  465. dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
  466. !is_valid_ether_addr(dev->dev_addr))
  467. return -EINVAL;
  468. /* No bridging of bridges */
  469. if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) {
  470. NL_SET_ERR_MSG(extack,
  471. "Can not enslave a bridge to a bridge");
  472. return -ELOOP;
  473. }
  474. /* Device has master upper dev */
  475. if (netdev_master_upper_dev_get(dev))
  476. return -EBUSY;
  477. /* No bridging devices that dislike that (e.g. wireless) */
  478. if (dev->priv_flags & IFF_DONT_BRIDGE) {
  479. NL_SET_ERR_MSG(extack,
  480. "Device does not allow enslaving to a bridge");
  481. return -EOPNOTSUPP;
  482. }
  483. p = new_nbp(br, dev);
  484. if (IS_ERR(p))
  485. return PTR_ERR(p);
  486. call_netdevice_notifiers(NETDEV_JOIN, dev);
  487. err = dev_set_allmulti(dev, 1);
  488. if (err) {
  489. br_multicast_del_port(p);
  490. netdev_put(dev, &p->dev_tracker);
  491. kfree(p); /* kobject not yet init'd, manually free */
  492. goto err1;
  493. }
  494. err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
  495. SYSFS_BRIDGE_PORT_ATTR);
  496. if (err)
  497. goto err2;
  498. err = br_sysfs_addif(p);
  499. if (err)
  500. goto err2;
  501. err = br_netpoll_enable(p);
  502. if (err)
  503. goto err3;
  504. err = netdev_rx_handler_register(dev, br_get_rx_handler(dev), p);
  505. if (err)
  506. goto err4;
  507. dev->priv_flags |= IFF_BRIDGE_PORT;
  508. err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, extack);
  509. if (err)
  510. goto err5;
  511. dev_disable_lro(dev);
  512. list_add_rcu(&p->list, &br->port_list);
  513. nbp_update_port_count(br);
  514. if (!br_promisc_port(p) && (p->dev->priv_flags & IFF_UNICAST_FLT)) {
  515. /* When updating the port count we also update all ports'
  516. * promiscuous mode.
  517. * A port leaving promiscuous mode normally gets the bridge's
  518. * fdb synced to the unicast filter (if supported), however,
  519. * `br_port_clear_promisc` does not distinguish between
  520. * non-promiscuous ports and *new* ports, so we need to
  521. * sync explicitly here.
  522. */
  523. fdb_synced = br_fdb_sync_static(br, p) == 0;
  524. if (!fdb_synced)
  525. netdev_err(dev, "failed to sync bridge static fdb addresses to this port\n");
  526. }
  527. netdev_update_features(br->dev);
  528. br_hr = br->dev->needed_headroom;
  529. dev_hr = netdev_get_fwd_headroom(dev);
  530. if (br_hr < dev_hr)
  531. update_headroom(br, dev_hr);
  532. else
  533. netdev_set_rx_headroom(dev, br_hr);
  534. if (br_fdb_add_local(br, p, dev->dev_addr, 0))
  535. netdev_err(dev, "failed insert local address bridge forwarding table\n");
  536. if (br->dev->addr_assign_type != NET_ADDR_SET) {
  537. /* Ask for permission to use this MAC address now, even if we
  538. * don't end up choosing it below.
  539. */
  540. err = dev_pre_changeaddr_notify(br->dev, dev->dev_addr, extack);
  541. if (err)
  542. goto err6;
  543. }
  544. err = nbp_vlan_init(p, extack);
  545. if (err) {
  546. netdev_err(dev, "failed to initialize vlan filtering on this port\n");
  547. goto err6;
  548. }
  549. spin_lock_bh(&br->lock);
  550. changed_addr = br_stp_recalculate_bridge_id(br);
  551. if (netif_running(dev) && netif_oper_up(dev) &&
  552. (br->dev->flags & IFF_UP))
  553. br_stp_enable_port(p);
  554. spin_unlock_bh(&br->lock);
  555. br_ifinfo_notify(RTM_NEWLINK, NULL, p);
  556. if (changed_addr)
  557. call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
  558. br_mtu_auto_adjust(br);
  559. br_set_gso_limits(br);
  560. kobject_uevent(&p->kobj, KOBJ_ADD);
  561. return 0;
  562. err6:
  563. if (fdb_synced)
  564. br_fdb_unsync_static(br, p);
  565. list_del_rcu(&p->list);
  566. br_fdb_delete_by_port(br, p, 0, 1);
  567. nbp_update_port_count(br);
  568. netdev_upper_dev_unlink(dev, br->dev);
  569. err5:
  570. dev->priv_flags &= ~IFF_BRIDGE_PORT;
  571. netdev_rx_handler_unregister(dev);
  572. err4:
  573. br_netpoll_disable(p);
  574. err3:
  575. sysfs_remove_link(br->ifobj, p->dev->name);
  576. err2:
  577. br_multicast_del_port(p);
  578. netdev_put(dev, &p->dev_tracker);
  579. kobject_put(&p->kobj);
  580. dev_set_allmulti(dev, -1);
  581. err1:
  582. return err;
  583. }
  584. /* called with RTNL */
  585. int br_del_if(struct net_bridge *br, struct net_device *dev)
  586. {
  587. struct net_bridge_port *p;
  588. bool changed_addr;
  589. p = br_port_get_rtnl(dev);
  590. if (!p || p->br != br)
  591. return -EINVAL;
  592. /* Since more than one interface can be attached to a bridge,
  593. * there still maybe an alternate path for netconsole to use;
  594. * therefore there is no reason for a NETDEV_RELEASE event.
  595. */
  596. del_nbp(p);
  597. br_mtu_auto_adjust(br);
  598. br_set_gso_limits(br);
  599. spin_lock_bh(&br->lock);
  600. changed_addr = br_stp_recalculate_bridge_id(br);
  601. spin_unlock_bh(&br->lock);
  602. if (changed_addr)
  603. call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
  604. netdev_update_features(br->dev);
  605. return 0;
  606. }
  607. void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
  608. {
  609. struct net_bridge *br = p->br;
  610. if (mask & BR_AUTO_MASK)
  611. nbp_update_port_count(br);
  612. if (mask & BR_NEIGH_SUPPRESS)
  613. br_recalculate_neigh_suppress_enabled(br);
  614. }
  615. bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
  616. {
  617. struct net_bridge_port *p;
  618. p = br_port_get_rtnl_rcu(dev);
  619. if (!p)
  620. return false;
  621. return p->flags & flag;
  622. }
  623. EXPORT_SYMBOL_GPL(br_port_flag_is_set);