br_mdb.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/err.h>
  3. #include <linux/igmp.h>
  4. #include <linux/kernel.h>
  5. #include <linux/netdevice.h>
  6. #include <linux/rculist.h>
  7. #include <linux/skbuff.h>
  8. #include <linux/if_ether.h>
  9. #include <net/ip.h>
  10. #include <net/netlink.h>
  11. #include <net/switchdev.h>
  12. #if IS_ENABLED(CONFIG_IPV6)
  13. #include <net/ipv6.h>
  14. #include <net/addrconf.h>
  15. #endif
  16. #include "br_private.h"
  17. static bool
  18. br_ip4_rports_get_timer(struct net_bridge_mcast_port *pmctx,
  19. unsigned long *timer)
  20. {
  21. *timer = br_timer_value(&pmctx->ip4_mc_router_timer);
  22. return !hlist_unhashed(&pmctx->ip4_rlist);
  23. }
  24. static bool
  25. br_ip6_rports_get_timer(struct net_bridge_mcast_port *pmctx,
  26. unsigned long *timer)
  27. {
  28. #if IS_ENABLED(CONFIG_IPV6)
  29. *timer = br_timer_value(&pmctx->ip6_mc_router_timer);
  30. return !hlist_unhashed(&pmctx->ip6_rlist);
  31. #else
  32. *timer = 0;
  33. return false;
  34. #endif
  35. }
  36. static size_t __br_rports_one_size(void)
  37. {
  38. return nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PORT */
  39. nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_TIMER */
  40. nla_total_size(sizeof(u8)) + /* MDBA_ROUTER_PATTR_TYPE */
  41. nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET_TIMER */
  42. nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET6_TIMER */
  43. nla_total_size(sizeof(u32)); /* MDBA_ROUTER_PATTR_VID */
  44. }
  45. size_t br_rports_size(const struct net_bridge_mcast *brmctx)
  46. {
  47. struct net_bridge_mcast_port *pmctx;
  48. size_t size = nla_total_size(0); /* MDBA_ROUTER */
  49. rcu_read_lock();
  50. hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
  51. ip4_rlist)
  52. size += __br_rports_one_size();
  53. #if IS_ENABLED(CONFIG_IPV6)
  54. hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
  55. ip6_rlist)
  56. size += __br_rports_one_size();
  57. #endif
  58. rcu_read_unlock();
  59. return size;
  60. }
  61. int br_rports_fill_info(struct sk_buff *skb,
  62. const struct net_bridge_mcast *brmctx)
  63. {
  64. u16 vid = brmctx->vlan ? brmctx->vlan->vid : 0;
  65. bool have_ip4_mc_rtr, have_ip6_mc_rtr;
  66. unsigned long ip4_timer, ip6_timer;
  67. struct nlattr *nest, *port_nest;
  68. struct net_bridge_port *p;
  69. if (!brmctx->multicast_router || !br_rports_have_mc_router(brmctx))
  70. return 0;
  71. nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
  72. if (nest == NULL)
  73. return -EMSGSIZE;
  74. list_for_each_entry_rcu(p, &brmctx->br->port_list, list) {
  75. struct net_bridge_mcast_port *pmctx;
  76. if (vid) {
  77. struct net_bridge_vlan *v;
  78. v = br_vlan_find(nbp_vlan_group(p), vid);
  79. if (!v)
  80. continue;
  81. pmctx = &v->port_mcast_ctx;
  82. } else {
  83. pmctx = &p->multicast_ctx;
  84. }
  85. have_ip4_mc_rtr = br_ip4_rports_get_timer(pmctx, &ip4_timer);
  86. have_ip6_mc_rtr = br_ip6_rports_get_timer(pmctx, &ip6_timer);
  87. if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
  88. continue;
  89. port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
  90. if (!port_nest)
  91. goto fail;
  92. if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
  93. nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
  94. max(ip4_timer, ip6_timer)) ||
  95. nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
  96. p->multicast_ctx.multicast_router) ||
  97. (have_ip4_mc_rtr &&
  98. nla_put_u32(skb, MDBA_ROUTER_PATTR_INET_TIMER,
  99. ip4_timer)) ||
  100. (have_ip6_mc_rtr &&
  101. nla_put_u32(skb, MDBA_ROUTER_PATTR_INET6_TIMER,
  102. ip6_timer)) ||
  103. (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid))) {
  104. nla_nest_cancel(skb, port_nest);
  105. goto fail;
  106. }
  107. nla_nest_end(skb, port_nest);
  108. }
  109. nla_nest_end(skb, nest);
  110. return 0;
  111. fail:
  112. nla_nest_cancel(skb, nest);
  113. return -EMSGSIZE;
  114. }
  115. static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
  116. {
  117. e->state = flags & MDB_PG_FLAGS_PERMANENT;
  118. e->flags = 0;
  119. if (flags & MDB_PG_FLAGS_OFFLOAD)
  120. e->flags |= MDB_FLAGS_OFFLOAD;
  121. if (flags & MDB_PG_FLAGS_FAST_LEAVE)
  122. e->flags |= MDB_FLAGS_FAST_LEAVE;
  123. if (flags & MDB_PG_FLAGS_STAR_EXCL)
  124. e->flags |= MDB_FLAGS_STAR_EXCL;
  125. if (flags & MDB_PG_FLAGS_BLOCKED)
  126. e->flags |= MDB_FLAGS_BLOCKED;
  127. }
  128. static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
  129. struct nlattr **mdb_attrs)
  130. {
  131. memset(ip, 0, sizeof(struct br_ip));
  132. ip->vid = entry->vid;
  133. ip->proto = entry->addr.proto;
  134. switch (ip->proto) {
  135. case htons(ETH_P_IP):
  136. ip->dst.ip4 = entry->addr.u.ip4;
  137. if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
  138. ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
  139. break;
  140. #if IS_ENABLED(CONFIG_IPV6)
  141. case htons(ETH_P_IPV6):
  142. ip->dst.ip6 = entry->addr.u.ip6;
  143. if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
  144. ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
  145. break;
  146. #endif
  147. default:
  148. ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
  149. }
  150. }
  151. static int __mdb_fill_srcs(struct sk_buff *skb,
  152. struct net_bridge_port_group *p)
  153. {
  154. struct net_bridge_group_src *ent;
  155. struct nlattr *nest, *nest_ent;
  156. if (hlist_empty(&p->src_list))
  157. return 0;
  158. nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
  159. if (!nest)
  160. return -EMSGSIZE;
  161. hlist_for_each_entry_rcu(ent, &p->src_list, node,
  162. lockdep_is_held(&p->key.port->br->multicast_lock)) {
  163. nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
  164. if (!nest_ent)
  165. goto out_cancel_err;
  166. switch (ent->addr.proto) {
  167. case htons(ETH_P_IP):
  168. if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
  169. ent->addr.src.ip4)) {
  170. nla_nest_cancel(skb, nest_ent);
  171. goto out_cancel_err;
  172. }
  173. break;
  174. #if IS_ENABLED(CONFIG_IPV6)
  175. case htons(ETH_P_IPV6):
  176. if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
  177. &ent->addr.src.ip6)) {
  178. nla_nest_cancel(skb, nest_ent);
  179. goto out_cancel_err;
  180. }
  181. break;
  182. #endif
  183. default:
  184. nla_nest_cancel(skb, nest_ent);
  185. continue;
  186. }
  187. if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
  188. br_timer_value(&ent->timer))) {
  189. nla_nest_cancel(skb, nest_ent);
  190. goto out_cancel_err;
  191. }
  192. nla_nest_end(skb, nest_ent);
  193. }
  194. nla_nest_end(skb, nest);
  195. return 0;
  196. out_cancel_err:
  197. nla_nest_cancel(skb, nest);
  198. return -EMSGSIZE;
  199. }
  200. static int __mdb_fill_info(struct sk_buff *skb,
  201. struct net_bridge_mdb_entry *mp,
  202. struct net_bridge_port_group *p)
  203. {
  204. bool dump_srcs_mode = false;
  205. struct timer_list *mtimer;
  206. struct nlattr *nest_ent;
  207. struct br_mdb_entry e;
  208. u8 flags = 0;
  209. int ifindex;
  210. memset(&e, 0, sizeof(e));
  211. if (p) {
  212. ifindex = p->key.port->dev->ifindex;
  213. mtimer = &p->timer;
  214. flags = p->flags;
  215. } else {
  216. ifindex = mp->br->dev->ifindex;
  217. mtimer = &mp->timer;
  218. }
  219. __mdb_entry_fill_flags(&e, flags);
  220. e.ifindex = ifindex;
  221. e.vid = mp->addr.vid;
  222. if (mp->addr.proto == htons(ETH_P_IP)) {
  223. e.addr.u.ip4 = mp->addr.dst.ip4;
  224. #if IS_ENABLED(CONFIG_IPV6)
  225. } else if (mp->addr.proto == htons(ETH_P_IPV6)) {
  226. e.addr.u.ip6 = mp->addr.dst.ip6;
  227. #endif
  228. } else {
  229. ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
  230. e.state = MDB_PG_FLAGS_PERMANENT;
  231. }
  232. e.addr.proto = mp->addr.proto;
  233. nest_ent = nla_nest_start_noflag(skb,
  234. MDBA_MDB_ENTRY_INFO);
  235. if (!nest_ent)
  236. return -EMSGSIZE;
  237. if (nla_put_nohdr(skb, sizeof(e), &e) ||
  238. nla_put_u32(skb,
  239. MDBA_MDB_EATTR_TIMER,
  240. br_timer_value(mtimer)))
  241. goto nest_err;
  242. switch (mp->addr.proto) {
  243. case htons(ETH_P_IP):
  244. dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_igmp_version == 3);
  245. if (mp->addr.src.ip4) {
  246. if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
  247. mp->addr.src.ip4))
  248. goto nest_err;
  249. break;
  250. }
  251. break;
  252. #if IS_ENABLED(CONFIG_IPV6)
  253. case htons(ETH_P_IPV6):
  254. dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_mld_version == 2);
  255. if (!ipv6_addr_any(&mp->addr.src.ip6)) {
  256. if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
  257. &mp->addr.src.ip6))
  258. goto nest_err;
  259. break;
  260. }
  261. break;
  262. #endif
  263. default:
  264. ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
  265. }
  266. if (p) {
  267. if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
  268. goto nest_err;
  269. if (dump_srcs_mode &&
  270. (__mdb_fill_srcs(skb, p) ||
  271. nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
  272. p->filter_mode)))
  273. goto nest_err;
  274. }
  275. nla_nest_end(skb, nest_ent);
  276. return 0;
  277. nest_err:
  278. nla_nest_cancel(skb, nest_ent);
  279. return -EMSGSIZE;
  280. }
  281. static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
  282. struct net_device *dev)
  283. {
  284. int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
  285. struct net_bridge *br = netdev_priv(dev);
  286. struct net_bridge_mdb_entry *mp;
  287. struct nlattr *nest, *nest2;
  288. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
  289. return 0;
  290. nest = nla_nest_start_noflag(skb, MDBA_MDB);
  291. if (nest == NULL)
  292. return -EMSGSIZE;
  293. hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
  294. struct net_bridge_port_group *p;
  295. struct net_bridge_port_group __rcu **pp;
  296. if (idx < s_idx)
  297. goto skip;
  298. nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
  299. if (!nest2) {
  300. err = -EMSGSIZE;
  301. break;
  302. }
  303. if (!s_pidx && mp->host_joined) {
  304. err = __mdb_fill_info(skb, mp, NULL);
  305. if (err) {
  306. nla_nest_cancel(skb, nest2);
  307. break;
  308. }
  309. }
  310. for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
  311. pp = &p->next) {
  312. if (!p->key.port)
  313. continue;
  314. if (pidx < s_pidx)
  315. goto skip_pg;
  316. err = __mdb_fill_info(skb, mp, p);
  317. if (err) {
  318. nla_nest_end(skb, nest2);
  319. goto out;
  320. }
  321. skip_pg:
  322. pidx++;
  323. }
  324. pidx = 0;
  325. s_pidx = 0;
  326. nla_nest_end(skb, nest2);
  327. skip:
  328. idx++;
  329. }
  330. out:
  331. cb->args[1] = idx;
  332. cb->args[2] = pidx;
  333. nla_nest_end(skb, nest);
  334. return err;
  335. }
  336. static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
  337. struct netlink_ext_ack *extack)
  338. {
  339. struct br_port_msg *bpm;
  340. if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
  341. NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
  342. return -EINVAL;
  343. }
  344. bpm = nlmsg_data(nlh);
  345. if (bpm->ifindex) {
  346. NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
  347. return -EINVAL;
  348. }
  349. if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
  350. NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
  351. return -EINVAL;
  352. }
  353. return 0;
  354. }
  355. static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
  356. {
  357. struct net_device *dev;
  358. struct net *net = sock_net(skb->sk);
  359. struct nlmsghdr *nlh = NULL;
  360. int idx = 0, s_idx;
  361. if (cb->strict_check) {
  362. int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
  363. if (err < 0)
  364. return err;
  365. }
  366. s_idx = cb->args[0];
  367. rcu_read_lock();
  368. cb->seq = net->dev_base_seq;
  369. for_each_netdev_rcu(net, dev) {
  370. if (netif_is_bridge_master(dev)) {
  371. struct net_bridge *br = netdev_priv(dev);
  372. struct br_port_msg *bpm;
  373. if (idx < s_idx)
  374. goto skip;
  375. nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
  376. cb->nlh->nlmsg_seq, RTM_GETMDB,
  377. sizeof(*bpm), NLM_F_MULTI);
  378. if (nlh == NULL)
  379. break;
  380. bpm = nlmsg_data(nlh);
  381. memset(bpm, 0, sizeof(*bpm));
  382. bpm->ifindex = dev->ifindex;
  383. if (br_mdb_fill_info(skb, cb, dev) < 0)
  384. goto out;
  385. if (br_rports_fill_info(skb, &br->multicast_ctx) < 0)
  386. goto out;
  387. cb->args[1] = 0;
  388. nlmsg_end(skb, nlh);
  389. skip:
  390. idx++;
  391. }
  392. }
  393. out:
  394. if (nlh)
  395. nlmsg_end(skb, nlh);
  396. rcu_read_unlock();
  397. cb->args[0] = idx;
  398. return skb->len;
  399. }
  400. static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
  401. struct net_device *dev,
  402. struct net_bridge_mdb_entry *mp,
  403. struct net_bridge_port_group *pg,
  404. int type)
  405. {
  406. struct nlmsghdr *nlh;
  407. struct br_port_msg *bpm;
  408. struct nlattr *nest, *nest2;
  409. nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
  410. if (!nlh)
  411. return -EMSGSIZE;
  412. bpm = nlmsg_data(nlh);
  413. memset(bpm, 0, sizeof(*bpm));
  414. bpm->family = AF_BRIDGE;
  415. bpm->ifindex = dev->ifindex;
  416. nest = nla_nest_start_noflag(skb, MDBA_MDB);
  417. if (nest == NULL)
  418. goto cancel;
  419. nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
  420. if (nest2 == NULL)
  421. goto end;
  422. if (__mdb_fill_info(skb, mp, pg))
  423. goto end;
  424. nla_nest_end(skb, nest2);
  425. nla_nest_end(skb, nest);
  426. nlmsg_end(skb, nlh);
  427. return 0;
  428. end:
  429. nla_nest_end(skb, nest);
  430. cancel:
  431. nlmsg_cancel(skb, nlh);
  432. return -EMSGSIZE;
  433. }
  434. static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
  435. {
  436. size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
  437. nla_total_size(sizeof(struct br_mdb_entry)) +
  438. nla_total_size(sizeof(u32));
  439. struct net_bridge_group_src *ent;
  440. size_t addr_size = 0;
  441. if (!pg)
  442. goto out;
  443. /* MDBA_MDB_EATTR_RTPROT */
  444. nlmsg_size += nla_total_size(sizeof(u8));
  445. switch (pg->key.addr.proto) {
  446. case htons(ETH_P_IP):
  447. /* MDBA_MDB_EATTR_SOURCE */
  448. if (pg->key.addr.src.ip4)
  449. nlmsg_size += nla_total_size(sizeof(__be32));
  450. if (pg->key.port->br->multicast_ctx.multicast_igmp_version == 2)
  451. goto out;
  452. addr_size = sizeof(__be32);
  453. break;
  454. #if IS_ENABLED(CONFIG_IPV6)
  455. case htons(ETH_P_IPV6):
  456. /* MDBA_MDB_EATTR_SOURCE */
  457. if (!ipv6_addr_any(&pg->key.addr.src.ip6))
  458. nlmsg_size += nla_total_size(sizeof(struct in6_addr));
  459. if (pg->key.port->br->multicast_ctx.multicast_mld_version == 1)
  460. goto out;
  461. addr_size = sizeof(struct in6_addr);
  462. break;
  463. #endif
  464. }
  465. /* MDBA_MDB_EATTR_GROUP_MODE */
  466. nlmsg_size += nla_total_size(sizeof(u8));
  467. /* MDBA_MDB_EATTR_SRC_LIST nested attr */
  468. if (!hlist_empty(&pg->src_list))
  469. nlmsg_size += nla_total_size(0);
  470. hlist_for_each_entry(ent, &pg->src_list, node) {
  471. /* MDBA_MDB_SRCLIST_ENTRY nested attr +
  472. * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
  473. */
  474. nlmsg_size += nla_total_size(0) +
  475. nla_total_size(addr_size) +
  476. nla_total_size(sizeof(u32));
  477. }
  478. out:
  479. return nlmsg_size;
  480. }
  481. void br_mdb_notify(struct net_device *dev,
  482. struct net_bridge_mdb_entry *mp,
  483. struct net_bridge_port_group *pg,
  484. int type)
  485. {
  486. struct net *net = dev_net(dev);
  487. struct sk_buff *skb;
  488. int err = -ENOBUFS;
  489. br_switchdev_mdb_notify(dev, mp, pg, type);
  490. skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
  491. if (!skb)
  492. goto errout;
  493. err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
  494. if (err < 0) {
  495. kfree_skb(skb);
  496. goto errout;
  497. }
  498. rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
  499. return;
  500. errout:
  501. rtnl_set_sk_err(net, RTNLGRP_MDB, err);
  502. }
  503. static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
  504. struct net_device *dev,
  505. int ifindex, u16 vid, u32 pid,
  506. u32 seq, int type, unsigned int flags)
  507. {
  508. struct nlattr *nest, *port_nest;
  509. struct br_port_msg *bpm;
  510. struct nlmsghdr *nlh;
  511. nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
  512. if (!nlh)
  513. return -EMSGSIZE;
  514. bpm = nlmsg_data(nlh);
  515. memset(bpm, 0, sizeof(*bpm));
  516. bpm->family = AF_BRIDGE;
  517. bpm->ifindex = dev->ifindex;
  518. nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
  519. if (!nest)
  520. goto cancel;
  521. port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
  522. if (!port_nest)
  523. goto end;
  524. if (nla_put_nohdr(skb, sizeof(u32), &ifindex)) {
  525. nla_nest_cancel(skb, port_nest);
  526. goto end;
  527. }
  528. if (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid)) {
  529. nla_nest_cancel(skb, port_nest);
  530. goto end;
  531. }
  532. nla_nest_end(skb, port_nest);
  533. nla_nest_end(skb, nest);
  534. nlmsg_end(skb, nlh);
  535. return 0;
  536. end:
  537. nla_nest_end(skb, nest);
  538. cancel:
  539. nlmsg_cancel(skb, nlh);
  540. return -EMSGSIZE;
  541. }
  542. static inline size_t rtnl_rtr_nlmsg_size(void)
  543. {
  544. return NLMSG_ALIGN(sizeof(struct br_port_msg))
  545. + nla_total_size(sizeof(__u32))
  546. + nla_total_size(sizeof(u16));
  547. }
  548. void br_rtr_notify(struct net_device *dev, struct net_bridge_mcast_port *pmctx,
  549. int type)
  550. {
  551. struct net *net = dev_net(dev);
  552. struct sk_buff *skb;
  553. int err = -ENOBUFS;
  554. int ifindex;
  555. u16 vid;
  556. ifindex = pmctx ? pmctx->port->dev->ifindex : 0;
  557. vid = pmctx && br_multicast_port_ctx_is_vlan(pmctx) ? pmctx->vlan->vid :
  558. 0;
  559. skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
  560. if (!skb)
  561. goto errout;
  562. err = nlmsg_populate_rtr_fill(skb, dev, ifindex, vid, 0, 0, type,
  563. NTF_SELF);
  564. if (err < 0) {
  565. kfree_skb(skb);
  566. goto errout;
  567. }
  568. rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
  569. return;
  570. errout:
  571. rtnl_set_sk_err(net, RTNLGRP_MDB, err);
  572. }
  573. static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
  574. struct netlink_ext_ack *extack)
  575. {
  576. if (entry->ifindex == 0) {
  577. NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
  578. return false;
  579. }
  580. if (entry->addr.proto == htons(ETH_P_IP)) {
  581. if (!ipv4_is_multicast(entry->addr.u.ip4)) {
  582. NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
  583. return false;
  584. }
  585. if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
  586. NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
  587. return false;
  588. }
  589. #if IS_ENABLED(CONFIG_IPV6)
  590. } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
  591. if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
  592. NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
  593. return false;
  594. }
  595. #endif
  596. } else if (entry->addr.proto == 0) {
  597. /* L2 mdb */
  598. if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
  599. NL_SET_ERR_MSG_MOD(extack, "L2 entry group is not multicast");
  600. return false;
  601. }
  602. } else {
  603. NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
  604. return false;
  605. }
  606. if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
  607. NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
  608. return false;
  609. }
  610. if (entry->vid >= VLAN_VID_MASK) {
  611. NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
  612. return false;
  613. }
  614. return true;
  615. }
  616. static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
  617. struct netlink_ext_ack *extack)
  618. {
  619. switch (proto) {
  620. case htons(ETH_P_IP):
  621. if (nla_len(attr) != sizeof(struct in_addr)) {
  622. NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
  623. return false;
  624. }
  625. if (ipv4_is_multicast(nla_get_in_addr(attr))) {
  626. NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
  627. return false;
  628. }
  629. break;
  630. #if IS_ENABLED(CONFIG_IPV6)
  631. case htons(ETH_P_IPV6): {
  632. struct in6_addr src;
  633. if (nla_len(attr) != sizeof(struct in6_addr)) {
  634. NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
  635. return false;
  636. }
  637. src = nla_get_in6_addr(attr);
  638. if (ipv6_addr_is_multicast(&src)) {
  639. NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
  640. return false;
  641. }
  642. break;
  643. }
  644. #endif
  645. default:
  646. NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
  647. return false;
  648. }
  649. return true;
  650. }
  651. static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
  652. [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
  653. sizeof(struct in_addr),
  654. sizeof(struct in6_addr)),
  655. };
  656. static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
  657. struct net_device **pdev, struct br_mdb_entry **pentry,
  658. struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
  659. {
  660. struct net *net = sock_net(skb->sk);
  661. struct br_mdb_entry *entry;
  662. struct br_port_msg *bpm;
  663. struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
  664. struct net_device *dev;
  665. int err;
  666. err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
  667. MDBA_SET_ENTRY_MAX, NULL, NULL);
  668. if (err < 0)
  669. return err;
  670. bpm = nlmsg_data(nlh);
  671. if (bpm->ifindex == 0) {
  672. NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
  673. return -EINVAL;
  674. }
  675. dev = __dev_get_by_index(net, bpm->ifindex);
  676. if (dev == NULL) {
  677. NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
  678. return -ENODEV;
  679. }
  680. if (!netif_is_bridge_master(dev)) {
  681. NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
  682. return -EOPNOTSUPP;
  683. }
  684. *pdev = dev;
  685. if (!tb[MDBA_SET_ENTRY]) {
  686. NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
  687. return -EINVAL;
  688. }
  689. if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
  690. NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
  691. return -EINVAL;
  692. }
  693. entry = nla_data(tb[MDBA_SET_ENTRY]);
  694. if (!is_valid_mdb_entry(entry, extack))
  695. return -EINVAL;
  696. *pentry = entry;
  697. if (tb[MDBA_SET_ENTRY_ATTRS]) {
  698. err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
  699. tb[MDBA_SET_ENTRY_ATTRS],
  700. br_mdbe_attrs_pol, extack);
  701. if (err)
  702. return err;
  703. if (mdb_attrs[MDBE_ATTR_SOURCE] &&
  704. !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
  705. entry->addr.proto, extack))
  706. return -EINVAL;
  707. } else {
  708. memset(mdb_attrs, 0,
  709. sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
  710. }
  711. return 0;
  712. }
  713. static struct net_bridge_mcast *
  714. __br_mdb_choose_context(struct net_bridge *br,
  715. const struct br_mdb_entry *entry,
  716. struct netlink_ext_ack *extack)
  717. {
  718. struct net_bridge_mcast *brmctx = NULL;
  719. struct net_bridge_vlan *v;
  720. if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
  721. brmctx = &br->multicast_ctx;
  722. goto out;
  723. }
  724. if (!entry->vid) {
  725. NL_SET_ERR_MSG_MOD(extack, "Cannot add an entry without a vlan when vlan snooping is enabled");
  726. goto out;
  727. }
  728. v = br_vlan_find(br_vlan_group(br), entry->vid);
  729. if (!v) {
  730. NL_SET_ERR_MSG_MOD(extack, "Vlan is not configured");
  731. goto out;
  732. }
  733. if (br_multicast_ctx_vlan_global_disabled(&v->br_mcast_ctx)) {
  734. NL_SET_ERR_MSG_MOD(extack, "Vlan's multicast processing is disabled");
  735. goto out;
  736. }
  737. brmctx = &v->br_mcast_ctx;
  738. out:
  739. return brmctx;
  740. }
  741. static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
  742. struct br_mdb_entry *entry,
  743. struct nlattr **mdb_attrs,
  744. struct netlink_ext_ack *extack)
  745. {
  746. struct net_bridge_mdb_entry *mp, *star_mp;
  747. struct net_bridge_port_group __rcu **pp;
  748. struct net_bridge_port_group *p;
  749. struct net_bridge_mcast *brmctx;
  750. struct br_ip group, star_group;
  751. unsigned long now = jiffies;
  752. unsigned char flags = 0;
  753. u8 filter_mode;
  754. int err;
  755. __mdb_entry_to_br_ip(entry, &group, mdb_attrs);
  756. brmctx = __br_mdb_choose_context(br, entry, extack);
  757. if (!brmctx)
  758. return -EINVAL;
  759. /* host join errors which can happen before creating the group */
  760. if (!port && !br_group_is_l2(&group)) {
  761. /* don't allow any flags for host-joined IP groups */
  762. if (entry->state) {
  763. NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
  764. return -EINVAL;
  765. }
  766. if (!br_multicast_is_star_g(&group)) {
  767. NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
  768. return -EINVAL;
  769. }
  770. }
  771. if (br_group_is_l2(&group) && entry->state != MDB_PERMANENT) {
  772. NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
  773. return -EINVAL;
  774. }
  775. mp = br_mdb_ip_get(br, &group);
  776. if (!mp) {
  777. mp = br_multicast_new_group(br, &group);
  778. err = PTR_ERR_OR_ZERO(mp);
  779. if (err)
  780. return err;
  781. }
  782. /* host join */
  783. if (!port) {
  784. if (mp->host_joined) {
  785. NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
  786. return -EEXIST;
  787. }
  788. br_multicast_host_join(brmctx, mp, false);
  789. br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
  790. return 0;
  791. }
  792. for (pp = &mp->ports;
  793. (p = mlock_dereference(*pp, br)) != NULL;
  794. pp = &p->next) {
  795. if (p->key.port == port) {
  796. NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
  797. return -EEXIST;
  798. }
  799. if ((unsigned long)p->key.port < (unsigned long)port)
  800. break;
  801. }
  802. filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
  803. MCAST_INCLUDE;
  804. if (entry->state == MDB_PERMANENT)
  805. flags |= MDB_PG_FLAGS_PERMANENT;
  806. p = br_multicast_new_port_group(port, &group, *pp, flags, NULL,
  807. filter_mode, RTPROT_STATIC);
  808. if (unlikely(!p)) {
  809. NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
  810. return -ENOMEM;
  811. }
  812. rcu_assign_pointer(*pp, p);
  813. if (entry->state == MDB_TEMPORARY)
  814. mod_timer(&p->timer,
  815. now + brmctx->multicast_membership_interval);
  816. br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
  817. /* if we are adding a new EXCLUDE port group (*,G) it needs to be also
  818. * added to all S,G entries for proper replication, if we are adding
  819. * a new INCLUDE port (S,G) then all of *,G EXCLUDE ports need to be
  820. * added to it for proper replication
  821. */
  822. if (br_multicast_should_handle_mode(brmctx, group.proto)) {
  823. switch (filter_mode) {
  824. case MCAST_EXCLUDE:
  825. br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
  826. break;
  827. case MCAST_INCLUDE:
  828. star_group = p->key.addr;
  829. memset(&star_group.src, 0, sizeof(star_group.src));
  830. star_mp = br_mdb_ip_get(br, &star_group);
  831. if (star_mp)
  832. br_multicast_sg_add_exclude_ports(star_mp, p);
  833. break;
  834. }
  835. }
  836. return 0;
  837. }
  838. static int __br_mdb_add(struct net *net, struct net_bridge *br,
  839. struct net_bridge_port *p,
  840. struct br_mdb_entry *entry,
  841. struct nlattr **mdb_attrs,
  842. struct netlink_ext_ack *extack)
  843. {
  844. int ret;
  845. spin_lock_bh(&br->multicast_lock);
  846. ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
  847. spin_unlock_bh(&br->multicast_lock);
  848. return ret;
  849. }
  850. static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
  851. struct netlink_ext_ack *extack)
  852. {
  853. struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
  854. struct net *net = sock_net(skb->sk);
  855. struct net_bridge_vlan_group *vg;
  856. struct net_bridge_port *p = NULL;
  857. struct net_device *dev, *pdev;
  858. struct br_mdb_entry *entry;
  859. struct net_bridge_vlan *v;
  860. struct net_bridge *br;
  861. int err;
  862. err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
  863. if (err < 0)
  864. return err;
  865. br = netdev_priv(dev);
  866. if (!netif_running(br->dev)) {
  867. NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
  868. return -EINVAL;
  869. }
  870. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
  871. NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
  872. return -EINVAL;
  873. }
  874. if (entry->ifindex != br->dev->ifindex) {
  875. pdev = __dev_get_by_index(net, entry->ifindex);
  876. if (!pdev) {
  877. NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
  878. return -ENODEV;
  879. }
  880. p = br_port_get_rtnl(pdev);
  881. if (!p) {
  882. NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
  883. return -EINVAL;
  884. }
  885. if (p->br != br) {
  886. NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
  887. return -EINVAL;
  888. }
  889. if (p->state == BR_STATE_DISABLED && entry->state != MDB_PERMANENT) {
  890. NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state and entry is not permanent");
  891. return -EINVAL;
  892. }
  893. vg = nbp_vlan_group(p);
  894. } else {
  895. vg = br_vlan_group(br);
  896. }
  897. /* If vlan filtering is enabled and VLAN is not specified
  898. * install mdb entry on all vlans configured on the port.
  899. */
  900. if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
  901. list_for_each_entry(v, &vg->vlan_list, vlist) {
  902. entry->vid = v->vid;
  903. err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
  904. if (err)
  905. break;
  906. }
  907. } else {
  908. err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
  909. }
  910. return err;
  911. }
  912. static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
  913. struct nlattr **mdb_attrs)
  914. {
  915. struct net_bridge_mdb_entry *mp;
  916. struct net_bridge_port_group *p;
  917. struct net_bridge_port_group __rcu **pp;
  918. struct br_ip ip;
  919. int err = -EINVAL;
  920. if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
  921. return -EINVAL;
  922. __mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
  923. spin_lock_bh(&br->multicast_lock);
  924. mp = br_mdb_ip_get(br, &ip);
  925. if (!mp)
  926. goto unlock;
  927. /* host leave */
  928. if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
  929. br_multicast_host_leave(mp, false);
  930. err = 0;
  931. br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
  932. if (!mp->ports && netif_running(br->dev))
  933. mod_timer(&mp->timer, jiffies);
  934. goto unlock;
  935. }
  936. for (pp = &mp->ports;
  937. (p = mlock_dereference(*pp, br)) != NULL;
  938. pp = &p->next) {
  939. if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
  940. continue;
  941. br_multicast_del_pg(mp, p, pp);
  942. err = 0;
  943. break;
  944. }
  945. unlock:
  946. spin_unlock_bh(&br->multicast_lock);
  947. return err;
  948. }
  949. static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
  950. struct netlink_ext_ack *extack)
  951. {
  952. struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
  953. struct net *net = sock_net(skb->sk);
  954. struct net_bridge_vlan_group *vg;
  955. struct net_bridge_port *p = NULL;
  956. struct net_device *dev, *pdev;
  957. struct br_mdb_entry *entry;
  958. struct net_bridge_vlan *v;
  959. struct net_bridge *br;
  960. int err;
  961. err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
  962. if (err < 0)
  963. return err;
  964. br = netdev_priv(dev);
  965. if (entry->ifindex != br->dev->ifindex) {
  966. pdev = __dev_get_by_index(net, entry->ifindex);
  967. if (!pdev)
  968. return -ENODEV;
  969. p = br_port_get_rtnl(pdev);
  970. if (!p) {
  971. NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
  972. return -EINVAL;
  973. }
  974. if (p->br != br) {
  975. NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
  976. return -EINVAL;
  977. }
  978. vg = nbp_vlan_group(p);
  979. } else {
  980. vg = br_vlan_group(br);
  981. }
  982. /* If vlan filtering is enabled and VLAN is not specified
  983. * delete mdb entry on all vlans configured on the port.
  984. */
  985. if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
  986. list_for_each_entry(v, &vg->vlan_list, vlist) {
  987. entry->vid = v->vid;
  988. err = __br_mdb_del(br, entry, mdb_attrs);
  989. }
  990. } else {
  991. err = __br_mdb_del(br, entry, mdb_attrs);
  992. }
  993. return err;
  994. }
  995. void br_mdb_init(void)
  996. {
  997. rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
  998. rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
  999. rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
  1000. }
  1001. void br_mdb_uninit(void)
  1002. {
  1003. rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
  1004. rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
  1005. rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
  1006. }