br_fdb.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Forwarding database
  4. * Linux ethernet bridge
  5. *
  6. * Authors:
  7. * Lennert Buytenhek <[email protected]>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/init.h>
  11. #include <linux/rculist.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/times.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/jhash.h>
  17. #include <linux/random.h>
  18. #include <linux/slab.h>
  19. #include <linux/atomic.h>
  20. #include <asm/unaligned.h>
  21. #include <linux/if_vlan.h>
  22. #include <net/switchdev.h>
  23. #include <trace/events/bridge.h>
  24. #include "br_private.h"
  25. static const struct rhashtable_params br_fdb_rht_params = {
  26. .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
  27. .key_offset = offsetof(struct net_bridge_fdb_entry, key),
  28. .key_len = sizeof(struct net_bridge_fdb_key),
  29. .automatic_shrinking = true,
  30. };
  31. static struct kmem_cache *br_fdb_cache __read_mostly;
  32. int __init br_fdb_init(void)
  33. {
  34. br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
  35. sizeof(struct net_bridge_fdb_entry),
  36. 0,
  37. SLAB_HWCACHE_ALIGN, NULL);
  38. if (!br_fdb_cache)
  39. return -ENOMEM;
  40. return 0;
  41. }
  42. void br_fdb_fini(void)
  43. {
  44. kmem_cache_destroy(br_fdb_cache);
  45. }
  46. int br_fdb_hash_init(struct net_bridge *br)
  47. {
  48. return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params);
  49. }
  50. void br_fdb_hash_fini(struct net_bridge *br)
  51. {
  52. rhashtable_destroy(&br->fdb_hash_tbl);
  53. }
  54. /* if topology_changing then use forward_delay (default 15 sec)
  55. * otherwise keep longer (default 5 minutes)
  56. */
  57. static inline unsigned long hold_time(const struct net_bridge *br)
  58. {
  59. return br->topology_change ? br->forward_delay : br->ageing_time;
  60. }
  61. static inline int has_expired(const struct net_bridge *br,
  62. const struct net_bridge_fdb_entry *fdb)
  63. {
  64. return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
  65. !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
  66. time_before_eq(fdb->updated + hold_time(br), jiffies);
  67. }
  68. static void fdb_rcu_free(struct rcu_head *head)
  69. {
  70. struct net_bridge_fdb_entry *ent
  71. = container_of(head, struct net_bridge_fdb_entry, rcu);
  72. kmem_cache_free(br_fdb_cache, ent);
  73. }
  74. static int fdb_to_nud(const struct net_bridge *br,
  75. const struct net_bridge_fdb_entry *fdb)
  76. {
  77. if (test_bit(BR_FDB_LOCAL, &fdb->flags))
  78. return NUD_PERMANENT;
  79. else if (test_bit(BR_FDB_STATIC, &fdb->flags))
  80. return NUD_NOARP;
  81. else if (has_expired(br, fdb))
  82. return NUD_STALE;
  83. else
  84. return NUD_REACHABLE;
  85. }
  86. static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
  87. const struct net_bridge_fdb_entry *fdb,
  88. u32 portid, u32 seq, int type, unsigned int flags)
  89. {
  90. const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
  91. unsigned long now = jiffies;
  92. struct nda_cacheinfo ci;
  93. struct nlmsghdr *nlh;
  94. struct ndmsg *ndm;
  95. nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
  96. if (nlh == NULL)
  97. return -EMSGSIZE;
  98. ndm = nlmsg_data(nlh);
  99. ndm->ndm_family = AF_BRIDGE;
  100. ndm->ndm_pad1 = 0;
  101. ndm->ndm_pad2 = 0;
  102. ndm->ndm_flags = 0;
  103. ndm->ndm_type = 0;
  104. ndm->ndm_ifindex = dst ? dst->dev->ifindex : br->dev->ifindex;
  105. ndm->ndm_state = fdb_to_nud(br, fdb);
  106. if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
  107. ndm->ndm_flags |= NTF_OFFLOADED;
  108. if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
  109. ndm->ndm_flags |= NTF_EXT_LEARNED;
  110. if (test_bit(BR_FDB_STICKY, &fdb->flags))
  111. ndm->ndm_flags |= NTF_STICKY;
  112. if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
  113. goto nla_put_failure;
  114. if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
  115. goto nla_put_failure;
  116. ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
  117. ci.ndm_confirmed = 0;
  118. ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
  119. ci.ndm_refcnt = 0;
  120. if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
  121. goto nla_put_failure;
  122. if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
  123. &fdb->key.vlan_id))
  124. goto nla_put_failure;
  125. if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) {
  126. struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS);
  127. u8 notify_bits = FDB_NOTIFY_BIT;
  128. if (!nest)
  129. goto nla_put_failure;
  130. if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
  131. notify_bits |= FDB_NOTIFY_INACTIVE_BIT;
  132. if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) {
  133. nla_nest_cancel(skb, nest);
  134. goto nla_put_failure;
  135. }
  136. nla_nest_end(skb, nest);
  137. }
  138. nlmsg_end(skb, nlh);
  139. return 0;
  140. nla_put_failure:
  141. nlmsg_cancel(skb, nlh);
  142. return -EMSGSIZE;
  143. }
  144. static inline size_t fdb_nlmsg_size(void)
  145. {
  146. return NLMSG_ALIGN(sizeof(struct ndmsg))
  147. + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
  148. + nla_total_size(sizeof(u32)) /* NDA_MASTER */
  149. + nla_total_size(sizeof(u16)) /* NDA_VLAN */
  150. + nla_total_size(sizeof(struct nda_cacheinfo))
  151. + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
  152. + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */
  153. }
  154. static void fdb_notify(struct net_bridge *br,
  155. const struct net_bridge_fdb_entry *fdb, int type,
  156. bool swdev_notify)
  157. {
  158. struct net *net = dev_net(br->dev);
  159. struct sk_buff *skb;
  160. int err = -ENOBUFS;
  161. if (swdev_notify)
  162. br_switchdev_fdb_notify(br, fdb, type);
  163. skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
  164. if (skb == NULL)
  165. goto errout;
  166. err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
  167. if (err < 0) {
  168. /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
  169. WARN_ON(err == -EMSGSIZE);
  170. kfree_skb(skb);
  171. goto errout;
  172. }
  173. rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
  174. return;
  175. errout:
  176. rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
  177. }
  178. static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
  179. const unsigned char *addr,
  180. __u16 vid)
  181. {
  182. struct net_bridge_fdb_key key;
  183. WARN_ON_ONCE(!rcu_read_lock_held());
  184. key.vlan_id = vid;
  185. memcpy(key.addr.addr, addr, sizeof(key.addr.addr));
  186. return rhashtable_lookup(tbl, &key, br_fdb_rht_params);
  187. }
  188. /* requires bridge hash_lock */
  189. static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
  190. const unsigned char *addr,
  191. __u16 vid)
  192. {
  193. struct net_bridge_fdb_entry *fdb;
  194. lockdep_assert_held_once(&br->hash_lock);
  195. rcu_read_lock();
  196. fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
  197. rcu_read_unlock();
  198. return fdb;
  199. }
  200. struct net_device *br_fdb_find_port(const struct net_device *br_dev,
  201. const unsigned char *addr,
  202. __u16 vid)
  203. {
  204. struct net_bridge_fdb_entry *f;
  205. struct net_device *dev = NULL;
  206. struct net_bridge *br;
  207. ASSERT_RTNL();
  208. if (!netif_is_bridge_master(br_dev))
  209. return NULL;
  210. br = netdev_priv(br_dev);
  211. rcu_read_lock();
  212. f = br_fdb_find_rcu(br, addr, vid);
  213. if (f && f->dst)
  214. dev = f->dst->dev;
  215. rcu_read_unlock();
  216. return dev;
  217. }
  218. EXPORT_SYMBOL_GPL(br_fdb_find_port);
  219. struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
  220. const unsigned char *addr,
  221. __u16 vid)
  222. {
  223. return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
  224. }
  225. /* When a static FDB entry is added, the mac address from the entry is
  226. * added to the bridge private HW address list and all required ports
  227. * are then updated with the new information.
  228. * Called under RTNL.
  229. */
  230. static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
  231. {
  232. int err;
  233. struct net_bridge_port *p;
  234. ASSERT_RTNL();
  235. list_for_each_entry(p, &br->port_list, list) {
  236. if (!br_promisc_port(p)) {
  237. err = dev_uc_add(p->dev, addr);
  238. if (err)
  239. goto undo;
  240. }
  241. }
  242. return;
  243. undo:
  244. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  245. if (!br_promisc_port(p))
  246. dev_uc_del(p->dev, addr);
  247. }
  248. }
  249. /* When a static FDB entry is deleted, the HW address from that entry is
  250. * also removed from the bridge private HW address list and updates all
  251. * the ports with needed information.
  252. * Called under RTNL.
  253. */
  254. static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
  255. {
  256. struct net_bridge_port *p;
  257. ASSERT_RTNL();
  258. list_for_each_entry(p, &br->port_list, list) {
  259. if (!br_promisc_port(p))
  260. dev_uc_del(p->dev, addr);
  261. }
  262. }
  263. static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
  264. bool swdev_notify)
  265. {
  266. trace_fdb_delete(br, f);
  267. if (test_bit(BR_FDB_STATIC, &f->flags))
  268. fdb_del_hw_addr(br, f->key.addr.addr);
  269. hlist_del_init_rcu(&f->fdb_node);
  270. rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
  271. br_fdb_rht_params);
  272. fdb_notify(br, f, RTM_DELNEIGH, swdev_notify);
  273. call_rcu(&f->rcu, fdb_rcu_free);
  274. }
  275. /* Delete a local entry if no other port had the same address. */
  276. static void fdb_delete_local(struct net_bridge *br,
  277. const struct net_bridge_port *p,
  278. struct net_bridge_fdb_entry *f)
  279. {
  280. const unsigned char *addr = f->key.addr.addr;
  281. struct net_bridge_vlan_group *vg;
  282. const struct net_bridge_vlan *v;
  283. struct net_bridge_port *op;
  284. u16 vid = f->key.vlan_id;
  285. /* Maybe another port has same hw addr? */
  286. list_for_each_entry(op, &br->port_list, list) {
  287. vg = nbp_vlan_group(op);
  288. if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
  289. (!vid || br_vlan_find(vg, vid))) {
  290. f->dst = op;
  291. clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
  292. return;
  293. }
  294. }
  295. vg = br_vlan_group(br);
  296. v = br_vlan_find(vg, vid);
  297. /* Maybe bridge device has same hw addr? */
  298. if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
  299. (!vid || (v && br_vlan_should_use(v)))) {
  300. f->dst = NULL;
  301. clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
  302. return;
  303. }
  304. fdb_delete(br, f, true);
  305. }
  306. void br_fdb_find_delete_local(struct net_bridge *br,
  307. const struct net_bridge_port *p,
  308. const unsigned char *addr, u16 vid)
  309. {
  310. struct net_bridge_fdb_entry *f;
  311. spin_lock_bh(&br->hash_lock);
  312. f = br_fdb_find(br, addr, vid);
  313. if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
  314. !test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p)
  315. fdb_delete_local(br, p, f);
  316. spin_unlock_bh(&br->hash_lock);
  317. }
  318. static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
  319. struct net_bridge_port *source,
  320. const unsigned char *addr,
  321. __u16 vid,
  322. unsigned long flags)
  323. {
  324. struct net_bridge_fdb_entry *fdb;
  325. int err;
  326. fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
  327. if (!fdb)
  328. return NULL;
  329. memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
  330. WRITE_ONCE(fdb->dst, source);
  331. fdb->key.vlan_id = vid;
  332. fdb->flags = flags;
  333. fdb->updated = fdb->used = jiffies;
  334. err = rhashtable_lookup_insert_fast(&br->fdb_hash_tbl, &fdb->rhnode,
  335. br_fdb_rht_params);
  336. if (err) {
  337. kmem_cache_free(br_fdb_cache, fdb);
  338. return NULL;
  339. }
  340. hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
  341. return fdb;
  342. }
  343. static int fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
  344. const unsigned char *addr, u16 vid)
  345. {
  346. struct net_bridge_fdb_entry *fdb;
  347. if (!is_valid_ether_addr(addr))
  348. return -EINVAL;
  349. fdb = br_fdb_find(br, addr, vid);
  350. if (fdb) {
  351. /* it is okay to have multiple ports with same
  352. * address, just use the first one.
  353. */
  354. if (test_bit(BR_FDB_LOCAL, &fdb->flags))
  355. return 0;
  356. br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
  357. source ? source->dev->name : br->dev->name, addr, vid);
  358. fdb_delete(br, fdb, true);
  359. }
  360. fdb = fdb_create(br, source, addr, vid,
  361. BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
  362. if (!fdb)
  363. return -ENOMEM;
  364. fdb_add_hw_addr(br, addr);
  365. fdb_notify(br, fdb, RTM_NEWNEIGH, true);
  366. return 0;
  367. }
  368. void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
  369. {
  370. struct net_bridge_vlan_group *vg;
  371. struct net_bridge_fdb_entry *f;
  372. struct net_bridge *br = p->br;
  373. struct net_bridge_vlan *v;
  374. spin_lock_bh(&br->hash_lock);
  375. vg = nbp_vlan_group(p);
  376. hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
  377. if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) &&
  378. !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) {
  379. /* delete old one */
  380. fdb_delete_local(br, p, f);
  381. /* if this port has no vlan information
  382. * configured, we can safely be done at
  383. * this point.
  384. */
  385. if (!vg || !vg->num_vlans)
  386. goto insert;
  387. }
  388. }
  389. insert:
  390. /* insert new address, may fail if invalid address or dup. */
  391. fdb_add_local(br, p, newaddr, 0);
  392. if (!vg || !vg->num_vlans)
  393. goto done;
  394. /* Now add entries for every VLAN configured on the port.
  395. * This function runs under RTNL so the bitmap will not change
  396. * from under us.
  397. */
  398. list_for_each_entry(v, &vg->vlan_list, vlist)
  399. fdb_add_local(br, p, newaddr, v->vid);
  400. done:
  401. spin_unlock_bh(&br->hash_lock);
  402. }
  403. void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
  404. {
  405. struct net_bridge_vlan_group *vg;
  406. struct net_bridge_fdb_entry *f;
  407. struct net_bridge_vlan *v;
  408. spin_lock_bh(&br->hash_lock);
  409. /* If old entry was unassociated with any port, then delete it. */
  410. f = br_fdb_find(br, br->dev->dev_addr, 0);
  411. if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
  412. !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
  413. fdb_delete_local(br, NULL, f);
  414. fdb_add_local(br, NULL, newaddr, 0);
  415. vg = br_vlan_group(br);
  416. if (!vg || !vg->num_vlans)
  417. goto out;
  418. /* Now remove and add entries for every VLAN configured on the
  419. * bridge. This function runs under RTNL so the bitmap will not
  420. * change from under us.
  421. */
  422. list_for_each_entry(v, &vg->vlan_list, vlist) {
  423. if (!br_vlan_should_use(v))
  424. continue;
  425. f = br_fdb_find(br, br->dev->dev_addr, v->vid);
  426. if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
  427. !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
  428. fdb_delete_local(br, NULL, f);
  429. fdb_add_local(br, NULL, newaddr, v->vid);
  430. }
  431. out:
  432. spin_unlock_bh(&br->hash_lock);
  433. }
  434. void br_fdb_cleanup(struct work_struct *work)
  435. {
  436. struct net_bridge *br = container_of(work, struct net_bridge,
  437. gc_work.work);
  438. struct net_bridge_fdb_entry *f = NULL;
  439. unsigned long delay = hold_time(br);
  440. unsigned long work_delay = delay;
  441. unsigned long now = jiffies;
  442. /* this part is tricky, in order to avoid blocking learning and
  443. * consequently forwarding, we rely on rcu to delete objects with
  444. * delayed freeing allowing us to continue traversing
  445. */
  446. rcu_read_lock();
  447. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  448. unsigned long this_timer = f->updated + delay;
  449. if (test_bit(BR_FDB_STATIC, &f->flags) ||
  450. test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) {
  451. if (test_bit(BR_FDB_NOTIFY, &f->flags)) {
  452. if (time_after(this_timer, now))
  453. work_delay = min(work_delay,
  454. this_timer - now);
  455. else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE,
  456. &f->flags))
  457. fdb_notify(br, f, RTM_NEWNEIGH, false);
  458. }
  459. continue;
  460. }
  461. if (time_after(this_timer, now)) {
  462. work_delay = min(work_delay, this_timer - now);
  463. } else {
  464. spin_lock_bh(&br->hash_lock);
  465. if (!hlist_unhashed(&f->fdb_node))
  466. fdb_delete(br, f, true);
  467. spin_unlock_bh(&br->hash_lock);
  468. }
  469. }
  470. rcu_read_unlock();
  471. /* Cleanup minimum 10 milliseconds apart */
  472. work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
  473. mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
  474. }
  475. static bool __fdb_flush_matches(const struct net_bridge *br,
  476. const struct net_bridge_fdb_entry *f,
  477. const struct net_bridge_fdb_flush_desc *desc)
  478. {
  479. const struct net_bridge_port *dst = READ_ONCE(f->dst);
  480. int port_ifidx = dst ? dst->dev->ifindex : br->dev->ifindex;
  481. if (desc->vlan_id && desc->vlan_id != f->key.vlan_id)
  482. return false;
  483. if (desc->port_ifindex && desc->port_ifindex != port_ifidx)
  484. return false;
  485. if (desc->flags_mask && (f->flags & desc->flags_mask) != desc->flags)
  486. return false;
  487. return true;
  488. }
  489. /* Flush forwarding database entries matching the description */
  490. void br_fdb_flush(struct net_bridge *br,
  491. const struct net_bridge_fdb_flush_desc *desc)
  492. {
  493. struct net_bridge_fdb_entry *f;
  494. rcu_read_lock();
  495. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  496. if (!__fdb_flush_matches(br, f, desc))
  497. continue;
  498. spin_lock_bh(&br->hash_lock);
  499. if (!hlist_unhashed(&f->fdb_node))
  500. fdb_delete(br, f, true);
  501. spin_unlock_bh(&br->hash_lock);
  502. }
  503. rcu_read_unlock();
  504. }
  505. static unsigned long __ndm_state_to_fdb_flags(u16 ndm_state)
  506. {
  507. unsigned long flags = 0;
  508. if (ndm_state & NUD_PERMANENT)
  509. __set_bit(BR_FDB_LOCAL, &flags);
  510. if (ndm_state & NUD_NOARP)
  511. __set_bit(BR_FDB_STATIC, &flags);
  512. return flags;
  513. }
  514. static unsigned long __ndm_flags_to_fdb_flags(u8 ndm_flags)
  515. {
  516. unsigned long flags = 0;
  517. if (ndm_flags & NTF_USE)
  518. __set_bit(BR_FDB_ADDED_BY_USER, &flags);
  519. if (ndm_flags & NTF_EXT_LEARNED)
  520. __set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &flags);
  521. if (ndm_flags & NTF_OFFLOADED)
  522. __set_bit(BR_FDB_OFFLOADED, &flags);
  523. if (ndm_flags & NTF_STICKY)
  524. __set_bit(BR_FDB_STICKY, &flags);
  525. return flags;
  526. }
  527. static int __fdb_flush_validate_ifindex(const struct net_bridge *br,
  528. int ifindex,
  529. struct netlink_ext_ack *extack)
  530. {
  531. const struct net_device *dev;
  532. dev = __dev_get_by_index(dev_net(br->dev), ifindex);
  533. if (!dev) {
  534. NL_SET_ERR_MSG_MOD(extack, "Unknown flush device ifindex");
  535. return -ENODEV;
  536. }
  537. if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
  538. NL_SET_ERR_MSG_MOD(extack, "Flush device is not a bridge or bridge port");
  539. return -EINVAL;
  540. }
  541. if (netif_is_bridge_master(dev) && dev != br->dev) {
  542. NL_SET_ERR_MSG_MOD(extack,
  543. "Flush bridge device does not match target bridge device");
  544. return -EINVAL;
  545. }
  546. if (netif_is_bridge_port(dev)) {
  547. struct net_bridge_port *p = br_port_get_rtnl(dev);
  548. if (p->br != br) {
  549. NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
  550. return -EINVAL;
  551. }
  552. }
  553. return 0;
  554. }
  555. int br_fdb_delete_bulk(struct ndmsg *ndm, struct nlattr *tb[],
  556. struct net_device *dev, u16 vid,
  557. struct netlink_ext_ack *extack)
  558. {
  559. u8 ndm_flags = ndm->ndm_flags & ~FDB_FLUSH_IGNORED_NDM_FLAGS;
  560. struct net_bridge_fdb_flush_desc desc = { .vlan_id = vid };
  561. struct net_bridge_port *p = NULL;
  562. struct net_bridge *br;
  563. if (netif_is_bridge_master(dev)) {
  564. br = netdev_priv(dev);
  565. } else {
  566. p = br_port_get_rtnl(dev);
  567. if (!p) {
  568. NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge port");
  569. return -EINVAL;
  570. }
  571. br = p->br;
  572. }
  573. if (ndm_flags & ~FDB_FLUSH_ALLOWED_NDM_FLAGS) {
  574. NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm flag bits set");
  575. return -EINVAL;
  576. }
  577. if (ndm->ndm_state & ~FDB_FLUSH_ALLOWED_NDM_STATES) {
  578. NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm state bits set");
  579. return -EINVAL;
  580. }
  581. desc.flags |= __ndm_state_to_fdb_flags(ndm->ndm_state);
  582. desc.flags |= __ndm_flags_to_fdb_flags(ndm_flags);
  583. if (tb[NDA_NDM_STATE_MASK]) {
  584. u16 ndm_state_mask = nla_get_u16(tb[NDA_NDM_STATE_MASK]);
  585. desc.flags_mask |= __ndm_state_to_fdb_flags(ndm_state_mask);
  586. }
  587. if (tb[NDA_NDM_FLAGS_MASK]) {
  588. u8 ndm_flags_mask = nla_get_u8(tb[NDA_NDM_FLAGS_MASK]);
  589. desc.flags_mask |= __ndm_flags_to_fdb_flags(ndm_flags_mask);
  590. }
  591. if (tb[NDA_IFINDEX]) {
  592. int err, ifidx = nla_get_s32(tb[NDA_IFINDEX]);
  593. err = __fdb_flush_validate_ifindex(br, ifidx, extack);
  594. if (err)
  595. return err;
  596. desc.port_ifindex = ifidx;
  597. } else if (p) {
  598. /* flush was invoked with port device and NTF_MASTER */
  599. desc.port_ifindex = p->dev->ifindex;
  600. }
  601. br_debug(br, "flushing port ifindex: %d vlan id: %u flags: 0x%lx flags mask: 0x%lx\n",
  602. desc.port_ifindex, desc.vlan_id, desc.flags, desc.flags_mask);
  603. br_fdb_flush(br, &desc);
  604. return 0;
  605. }
  606. /* Flush all entries referring to a specific port.
  607. * if do_all is set also flush static entries
  608. * if vid is set delete all entries that match the vlan_id
  609. */
  610. void br_fdb_delete_by_port(struct net_bridge *br,
  611. const struct net_bridge_port *p,
  612. u16 vid,
  613. int do_all)
  614. {
  615. struct net_bridge_fdb_entry *f;
  616. struct hlist_node *tmp;
  617. spin_lock_bh(&br->hash_lock);
  618. hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
  619. if (f->dst != p)
  620. continue;
  621. if (!do_all)
  622. if (test_bit(BR_FDB_STATIC, &f->flags) ||
  623. (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags) &&
  624. !test_bit(BR_FDB_OFFLOADED, &f->flags)) ||
  625. (vid && f->key.vlan_id != vid))
  626. continue;
  627. if (test_bit(BR_FDB_LOCAL, &f->flags))
  628. fdb_delete_local(br, p, f);
  629. else
  630. fdb_delete(br, f, true);
  631. }
  632. spin_unlock_bh(&br->hash_lock);
  633. }
  634. #if IS_ENABLED(CONFIG_ATM_LANE)
  635. /* Interface used by ATM LANE hook to test
  636. * if an addr is on some other bridge port */
  637. int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
  638. {
  639. struct net_bridge_fdb_entry *fdb;
  640. struct net_bridge_port *port;
  641. int ret;
  642. rcu_read_lock();
  643. port = br_port_get_rcu(dev);
  644. if (!port)
  645. ret = 0;
  646. else {
  647. const struct net_bridge_port *dst = NULL;
  648. fdb = br_fdb_find_rcu(port->br, addr, 0);
  649. if (fdb)
  650. dst = READ_ONCE(fdb->dst);
  651. ret = dst && dst->dev != dev &&
  652. dst->state == BR_STATE_FORWARDING;
  653. }
  654. rcu_read_unlock();
  655. return ret;
  656. }
  657. #endif /* CONFIG_ATM_LANE */
  658. /*
  659. * Fill buffer with forwarding table records in
  660. * the API format.
  661. */
  662. int br_fdb_fillbuf(struct net_bridge *br, void *buf,
  663. unsigned long maxnum, unsigned long skip)
  664. {
  665. struct net_bridge_fdb_entry *f;
  666. struct __fdb_entry *fe = buf;
  667. int num = 0;
  668. memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
  669. rcu_read_lock();
  670. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  671. if (num >= maxnum)
  672. break;
  673. if (has_expired(br, f))
  674. continue;
  675. /* ignore pseudo entry for local MAC address */
  676. if (!f->dst)
  677. continue;
  678. if (skip) {
  679. --skip;
  680. continue;
  681. }
  682. /* convert from internal format to API */
  683. memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
  684. /* due to ABI compat need to split into hi/lo */
  685. fe->port_no = f->dst->port_no;
  686. fe->port_hi = f->dst->port_no >> 8;
  687. fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
  688. if (!test_bit(BR_FDB_STATIC, &f->flags))
  689. fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
  690. ++fe;
  691. ++num;
  692. }
  693. rcu_read_unlock();
  694. return num;
  695. }
  696. /* Add entry for local address of interface */
  697. int br_fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
  698. const unsigned char *addr, u16 vid)
  699. {
  700. int ret;
  701. spin_lock_bh(&br->hash_lock);
  702. ret = fdb_add_local(br, source, addr, vid);
  703. spin_unlock_bh(&br->hash_lock);
  704. return ret;
  705. }
  706. /* returns true if the fdb was modified */
  707. static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb)
  708. {
  709. return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) &&
  710. test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
  711. }
  712. void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
  713. const unsigned char *addr, u16 vid, unsigned long flags)
  714. {
  715. struct net_bridge_fdb_entry *fdb;
  716. /* some users want to always flood. */
  717. if (hold_time(br) == 0)
  718. return;
  719. fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
  720. if (likely(fdb)) {
  721. /* attempt to update an entry for a local interface */
  722. if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) {
  723. if (net_ratelimit())
  724. br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
  725. source->dev->name, addr, vid);
  726. } else {
  727. unsigned long now = jiffies;
  728. bool fdb_modified = false;
  729. if (now != fdb->updated) {
  730. fdb->updated = now;
  731. fdb_modified = __fdb_mark_active(fdb);
  732. }
  733. /* fastpath: update of existing entry */
  734. if (unlikely(source != READ_ONCE(fdb->dst) &&
  735. !test_bit(BR_FDB_STICKY, &fdb->flags))) {
  736. br_switchdev_fdb_notify(br, fdb, RTM_DELNEIGH);
  737. WRITE_ONCE(fdb->dst, source);
  738. fdb_modified = true;
  739. /* Take over HW learned entry */
  740. if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
  741. &fdb->flags)))
  742. clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
  743. &fdb->flags);
  744. }
  745. if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
  746. set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
  747. if (unlikely(fdb_modified)) {
  748. trace_br_fdb_update(br, source, addr, vid, flags);
  749. fdb_notify(br, fdb, RTM_NEWNEIGH, true);
  750. }
  751. }
  752. } else {
  753. spin_lock(&br->hash_lock);
  754. fdb = fdb_create(br, source, addr, vid, flags);
  755. if (fdb) {
  756. trace_br_fdb_update(br, source, addr, vid, flags);
  757. fdb_notify(br, fdb, RTM_NEWNEIGH, true);
  758. }
  759. /* else we lose race and someone else inserts
  760. * it first, don't bother updating
  761. */
  762. spin_unlock(&br->hash_lock);
  763. }
  764. }
  765. /* Dump information about entries, in response to GETNEIGH */
  766. int br_fdb_dump(struct sk_buff *skb,
  767. struct netlink_callback *cb,
  768. struct net_device *dev,
  769. struct net_device *filter_dev,
  770. int *idx)
  771. {
  772. struct net_bridge *br = netdev_priv(dev);
  773. struct net_bridge_fdb_entry *f;
  774. int err = 0;
  775. if (!netif_is_bridge_master(dev))
  776. return err;
  777. if (!filter_dev) {
  778. err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
  779. if (err < 0)
  780. return err;
  781. }
  782. rcu_read_lock();
  783. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  784. if (*idx < cb->args[2])
  785. goto skip;
  786. if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
  787. if (filter_dev != dev)
  788. goto skip;
  789. /* !f->dst is a special case for bridge
  790. * It means the MAC belongs to the bridge
  791. * Therefore need a little more filtering
  792. * we only want to dump the !f->dst case
  793. */
  794. if (f->dst)
  795. goto skip;
  796. }
  797. if (!filter_dev && f->dst)
  798. goto skip;
  799. err = fdb_fill_info(skb, br, f,
  800. NETLINK_CB(cb->skb).portid,
  801. cb->nlh->nlmsg_seq,
  802. RTM_NEWNEIGH,
  803. NLM_F_MULTI);
  804. if (err < 0)
  805. break;
  806. skip:
  807. *idx += 1;
  808. }
  809. rcu_read_unlock();
  810. return err;
  811. }
  812. int br_fdb_get(struct sk_buff *skb,
  813. struct nlattr *tb[],
  814. struct net_device *dev,
  815. const unsigned char *addr,
  816. u16 vid, u32 portid, u32 seq,
  817. struct netlink_ext_ack *extack)
  818. {
  819. struct net_bridge *br = netdev_priv(dev);
  820. struct net_bridge_fdb_entry *f;
  821. int err = 0;
  822. rcu_read_lock();
  823. f = br_fdb_find_rcu(br, addr, vid);
  824. if (!f) {
  825. NL_SET_ERR_MSG(extack, "Fdb entry not found");
  826. err = -ENOENT;
  827. goto errout;
  828. }
  829. err = fdb_fill_info(skb, br, f, portid, seq,
  830. RTM_NEWNEIGH, 0);
  831. errout:
  832. rcu_read_unlock();
  833. return err;
  834. }
  835. /* returns true if the fdb is modified */
  836. static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify)
  837. {
  838. bool modified = false;
  839. /* allow to mark an entry as inactive, usually done on creation */
  840. if ((notify & FDB_NOTIFY_INACTIVE_BIT) &&
  841. !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
  842. modified = true;
  843. if ((notify & FDB_NOTIFY_BIT) &&
  844. !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) {
  845. /* enabled activity tracking */
  846. modified = true;
  847. } else if (!(notify & FDB_NOTIFY_BIT) &&
  848. test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) {
  849. /* disabled activity tracking, clear notify state */
  850. clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags);
  851. modified = true;
  852. }
  853. return modified;
  854. }
  855. /* Update (create or replace) forwarding database entry */
  856. static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
  857. const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid,
  858. struct nlattr *nfea_tb[])
  859. {
  860. bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY);
  861. bool refresh = !nfea_tb[NFEA_DONT_REFRESH];
  862. struct net_bridge_fdb_entry *fdb;
  863. u16 state = ndm->ndm_state;
  864. bool modified = false;
  865. u8 notify = 0;
  866. /* If the port cannot learn allow only local and static entries */
  867. if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
  868. !(source->state == BR_STATE_LEARNING ||
  869. source->state == BR_STATE_FORWARDING))
  870. return -EPERM;
  871. if (!source && !(state & NUD_PERMANENT)) {
  872. pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
  873. br->dev->name);
  874. return -EINVAL;
  875. }
  876. if (is_sticky && (state & NUD_PERMANENT))
  877. return -EINVAL;
  878. if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) {
  879. notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]);
  880. if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) ||
  881. (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT)
  882. return -EINVAL;
  883. }
  884. fdb = br_fdb_find(br, addr, vid);
  885. if (fdb == NULL) {
  886. if (!(flags & NLM_F_CREATE))
  887. return -ENOENT;
  888. fdb = fdb_create(br, source, addr, vid, 0);
  889. if (!fdb)
  890. return -ENOMEM;
  891. modified = true;
  892. } else {
  893. if (flags & NLM_F_EXCL)
  894. return -EEXIST;
  895. if (READ_ONCE(fdb->dst) != source) {
  896. WRITE_ONCE(fdb->dst, source);
  897. modified = true;
  898. }
  899. }
  900. if (fdb_to_nud(br, fdb) != state) {
  901. if (state & NUD_PERMANENT) {
  902. set_bit(BR_FDB_LOCAL, &fdb->flags);
  903. if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
  904. fdb_add_hw_addr(br, addr);
  905. } else if (state & NUD_NOARP) {
  906. clear_bit(BR_FDB_LOCAL, &fdb->flags);
  907. if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
  908. fdb_add_hw_addr(br, addr);
  909. } else {
  910. clear_bit(BR_FDB_LOCAL, &fdb->flags);
  911. if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags))
  912. fdb_del_hw_addr(br, addr);
  913. }
  914. modified = true;
  915. }
  916. if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) {
  917. change_bit(BR_FDB_STICKY, &fdb->flags);
  918. modified = true;
  919. }
  920. if (fdb_handle_notify(fdb, notify))
  921. modified = true;
  922. set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
  923. fdb->used = jiffies;
  924. if (modified) {
  925. if (refresh)
  926. fdb->updated = jiffies;
  927. fdb_notify(br, fdb, RTM_NEWNEIGH, true);
  928. }
  929. return 0;
  930. }
  931. static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
  932. struct net_bridge_port *p, const unsigned char *addr,
  933. u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
  934. struct netlink_ext_ack *extack)
  935. {
  936. int err = 0;
  937. if (ndm->ndm_flags & NTF_USE) {
  938. if (!p) {
  939. pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
  940. br->dev->name);
  941. return -EINVAL;
  942. }
  943. if (!nbp_state_should_learn(p))
  944. return 0;
  945. local_bh_disable();
  946. rcu_read_lock();
  947. br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
  948. rcu_read_unlock();
  949. local_bh_enable();
  950. } else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
  951. if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
  952. NL_SET_ERR_MSG_MOD(extack,
  953. "FDB entry towards bridge must be permanent");
  954. return -EINVAL;
  955. }
  956. err = br_fdb_external_learn_add(br, p, addr, vid, true);
  957. } else {
  958. spin_lock_bh(&br->hash_lock);
  959. err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
  960. spin_unlock_bh(&br->hash_lock);
  961. }
  962. return err;
  963. }
  964. static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = {
  965. [NFEA_ACTIVITY_NOTIFY] = { .type = NLA_U8 },
  966. [NFEA_DONT_REFRESH] = { .type = NLA_FLAG },
  967. };
  968. /* Add new permanent fdb entry with RTM_NEWNEIGH */
  969. int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
  970. struct net_device *dev,
  971. const unsigned char *addr, u16 vid, u16 nlh_flags,
  972. struct netlink_ext_ack *extack)
  973. {
  974. struct nlattr *nfea_tb[NFEA_MAX + 1], *attr;
  975. struct net_bridge_vlan_group *vg;
  976. struct net_bridge_port *p = NULL;
  977. struct net_bridge_vlan *v;
  978. struct net_bridge *br = NULL;
  979. int err = 0;
  980. trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags);
  981. if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
  982. pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
  983. return -EINVAL;
  984. }
  985. if (is_zero_ether_addr(addr)) {
  986. pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
  987. return -EINVAL;
  988. }
  989. if (netif_is_bridge_master(dev)) {
  990. br = netdev_priv(dev);
  991. vg = br_vlan_group(br);
  992. } else {
  993. p = br_port_get_rtnl(dev);
  994. if (!p) {
  995. pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
  996. dev->name);
  997. return -EINVAL;
  998. }
  999. br = p->br;
  1000. vg = nbp_vlan_group(p);
  1001. }
  1002. if (tb[NDA_FDB_EXT_ATTRS]) {
  1003. attr = tb[NDA_FDB_EXT_ATTRS];
  1004. err = nla_parse_nested(nfea_tb, NFEA_MAX, attr,
  1005. br_nda_fdb_pol, extack);
  1006. if (err)
  1007. return err;
  1008. } else {
  1009. memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1));
  1010. }
  1011. if (vid) {
  1012. v = br_vlan_find(vg, vid);
  1013. if (!v || !br_vlan_should_use(v)) {
  1014. pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
  1015. return -EINVAL;
  1016. }
  1017. /* VID was specified, so use it. */
  1018. err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
  1019. extack);
  1020. } else {
  1021. err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
  1022. extack);
  1023. if (err || !vg || !vg->num_vlans)
  1024. goto out;
  1025. /* We have vlans configured on this port and user didn't
  1026. * specify a VLAN. To be nice, add/update entry for every
  1027. * vlan on this port.
  1028. */
  1029. list_for_each_entry(v, &vg->vlan_list, vlist) {
  1030. if (!br_vlan_should_use(v))
  1031. continue;
  1032. err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
  1033. nfea_tb, extack);
  1034. if (err)
  1035. goto out;
  1036. }
  1037. }
  1038. out:
  1039. return err;
  1040. }
  1041. static int fdb_delete_by_addr_and_port(struct net_bridge *br,
  1042. const struct net_bridge_port *p,
  1043. const u8 *addr, u16 vlan)
  1044. {
  1045. struct net_bridge_fdb_entry *fdb;
  1046. fdb = br_fdb_find(br, addr, vlan);
  1047. if (!fdb || READ_ONCE(fdb->dst) != p)
  1048. return -ENOENT;
  1049. fdb_delete(br, fdb, true);
  1050. return 0;
  1051. }
  1052. static int __br_fdb_delete(struct net_bridge *br,
  1053. const struct net_bridge_port *p,
  1054. const unsigned char *addr, u16 vid)
  1055. {
  1056. int err;
  1057. spin_lock_bh(&br->hash_lock);
  1058. err = fdb_delete_by_addr_and_port(br, p, addr, vid);
  1059. spin_unlock_bh(&br->hash_lock);
  1060. return err;
  1061. }
  1062. /* Remove neighbor entry with RTM_DELNEIGH */
  1063. int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
  1064. struct net_device *dev,
  1065. const unsigned char *addr, u16 vid,
  1066. struct netlink_ext_ack *extack)
  1067. {
  1068. struct net_bridge_vlan_group *vg;
  1069. struct net_bridge_port *p = NULL;
  1070. struct net_bridge_vlan *v;
  1071. struct net_bridge *br;
  1072. int err;
  1073. if (netif_is_bridge_master(dev)) {
  1074. br = netdev_priv(dev);
  1075. vg = br_vlan_group(br);
  1076. } else {
  1077. p = br_port_get_rtnl(dev);
  1078. if (!p) {
  1079. pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
  1080. dev->name);
  1081. return -EINVAL;
  1082. }
  1083. vg = nbp_vlan_group(p);
  1084. br = p->br;
  1085. }
  1086. if (vid) {
  1087. v = br_vlan_find(vg, vid);
  1088. if (!v) {
  1089. pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
  1090. return -EINVAL;
  1091. }
  1092. err = __br_fdb_delete(br, p, addr, vid);
  1093. } else {
  1094. err = -ENOENT;
  1095. err &= __br_fdb_delete(br, p, addr, 0);
  1096. if (!vg || !vg->num_vlans)
  1097. return err;
  1098. list_for_each_entry(v, &vg->vlan_list, vlist) {
  1099. if (!br_vlan_should_use(v))
  1100. continue;
  1101. err &= __br_fdb_delete(br, p, addr, v->vid);
  1102. }
  1103. }
  1104. return err;
  1105. }
  1106. int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
  1107. {
  1108. struct net_bridge_fdb_entry *f, *tmp;
  1109. int err = 0;
  1110. ASSERT_RTNL();
  1111. /* the key here is that static entries change only under rtnl */
  1112. rcu_read_lock();
  1113. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  1114. /* We only care for static entries */
  1115. if (!test_bit(BR_FDB_STATIC, &f->flags))
  1116. continue;
  1117. err = dev_uc_add(p->dev, f->key.addr.addr);
  1118. if (err)
  1119. goto rollback;
  1120. }
  1121. done:
  1122. rcu_read_unlock();
  1123. return err;
  1124. rollback:
  1125. hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
  1126. /* We only care for static entries */
  1127. if (!test_bit(BR_FDB_STATIC, &tmp->flags))
  1128. continue;
  1129. if (tmp == f)
  1130. break;
  1131. dev_uc_del(p->dev, tmp->key.addr.addr);
  1132. }
  1133. goto done;
  1134. }
  1135. void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
  1136. {
  1137. struct net_bridge_fdb_entry *f;
  1138. ASSERT_RTNL();
  1139. rcu_read_lock();
  1140. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  1141. /* We only care for static entries */
  1142. if (!test_bit(BR_FDB_STATIC, &f->flags))
  1143. continue;
  1144. dev_uc_del(p->dev, f->key.addr.addr);
  1145. }
  1146. rcu_read_unlock();
  1147. }
  1148. int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
  1149. const unsigned char *addr, u16 vid,
  1150. bool swdev_notify)
  1151. {
  1152. struct net_bridge_fdb_entry *fdb;
  1153. bool modified = false;
  1154. int err = 0;
  1155. trace_br_fdb_external_learn_add(br, p, addr, vid);
  1156. spin_lock_bh(&br->hash_lock);
  1157. fdb = br_fdb_find(br, addr, vid);
  1158. if (!fdb) {
  1159. unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN);
  1160. if (swdev_notify)
  1161. flags |= BIT(BR_FDB_ADDED_BY_USER);
  1162. if (!p)
  1163. flags |= BIT(BR_FDB_LOCAL);
  1164. fdb = fdb_create(br, p, addr, vid, flags);
  1165. if (!fdb) {
  1166. err = -ENOMEM;
  1167. goto err_unlock;
  1168. }
  1169. fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
  1170. } else {
  1171. fdb->updated = jiffies;
  1172. if (READ_ONCE(fdb->dst) != p) {
  1173. WRITE_ONCE(fdb->dst, p);
  1174. modified = true;
  1175. }
  1176. if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
  1177. /* Refresh entry */
  1178. fdb->used = jiffies;
  1179. } else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
  1180. /* Take over SW learned entry */
  1181. set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
  1182. modified = true;
  1183. }
  1184. if (swdev_notify)
  1185. set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
  1186. if (!p)
  1187. set_bit(BR_FDB_LOCAL, &fdb->flags);
  1188. if (modified)
  1189. fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
  1190. }
  1191. err_unlock:
  1192. spin_unlock_bh(&br->hash_lock);
  1193. return err;
  1194. }
  1195. int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
  1196. const unsigned char *addr, u16 vid,
  1197. bool swdev_notify)
  1198. {
  1199. struct net_bridge_fdb_entry *fdb;
  1200. int err = 0;
  1201. spin_lock_bh(&br->hash_lock);
  1202. fdb = br_fdb_find(br, addr, vid);
  1203. if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
  1204. fdb_delete(br, fdb, swdev_notify);
  1205. else
  1206. err = -ENOENT;
  1207. spin_unlock_bh(&br->hash_lock);
  1208. return err;
  1209. }
  1210. void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
  1211. const unsigned char *addr, u16 vid, bool offloaded)
  1212. {
  1213. struct net_bridge_fdb_entry *fdb;
  1214. spin_lock_bh(&br->hash_lock);
  1215. fdb = br_fdb_find(br, addr, vid);
  1216. if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags))
  1217. change_bit(BR_FDB_OFFLOADED, &fdb->flags);
  1218. spin_unlock_bh(&br->hash_lock);
  1219. }
  1220. void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
  1221. {
  1222. struct net_bridge_fdb_entry *f;
  1223. struct net_bridge_port *p;
  1224. ASSERT_RTNL();
  1225. p = br_port_get_rtnl(dev);
  1226. if (!p)
  1227. return;
  1228. spin_lock_bh(&p->br->hash_lock);
  1229. hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
  1230. if (f->dst == p && f->key.vlan_id == vid)
  1231. clear_bit(BR_FDB_OFFLOADED, &f->flags);
  1232. }
  1233. spin_unlock_bh(&p->br->hash_lock);
  1234. }
  1235. EXPORT_SYMBOL_GPL(br_fdb_clear_offload);