br_netlink.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Bridge netlink control interface
  4. *
  5. * Authors:
  6. * Stephen Hemminger <[email protected]>
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/slab.h>
  10. #include <linux/etherdevice.h>
  11. #include <net/rtnetlink.h>
  12. #include <net/net_namespace.h>
  13. #include <net/sock.h>
  14. #include <uapi/linux/if_bridge.h>
  15. #include "br_private.h"
  16. #include "br_private_stp.h"
  17. #include "br_private_cfm.h"
  18. #include "br_private_tunnel.h"
  19. #include "br_private_mcast_eht.h"
  20. static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
  21. u32 filter_mask)
  22. {
  23. struct net_bridge_vlan *v;
  24. u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
  25. u16 flags, pvid;
  26. int num_vlans = 0;
  27. if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
  28. return 0;
  29. pvid = br_get_pvid(vg);
  30. /* Count number of vlan infos */
  31. list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
  32. flags = 0;
  33. /* only a context, bridge vlan not activated */
  34. if (!br_vlan_should_use(v))
  35. continue;
  36. if (v->vid == pvid)
  37. flags |= BRIDGE_VLAN_INFO_PVID;
  38. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  39. flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  40. if (vid_range_start == 0) {
  41. goto initvars;
  42. } else if ((v->vid - vid_range_end) == 1 &&
  43. flags == vid_range_flags) {
  44. vid_range_end = v->vid;
  45. continue;
  46. } else {
  47. if ((vid_range_end - vid_range_start) > 0)
  48. num_vlans += 2;
  49. else
  50. num_vlans += 1;
  51. }
  52. initvars:
  53. vid_range_start = v->vid;
  54. vid_range_end = v->vid;
  55. vid_range_flags = flags;
  56. }
  57. if (vid_range_start != 0) {
  58. if ((vid_range_end - vid_range_start) > 0)
  59. num_vlans += 2;
  60. else
  61. num_vlans += 1;
  62. }
  63. return num_vlans;
  64. }
  65. static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
  66. u32 filter_mask)
  67. {
  68. int num_vlans;
  69. if (!vg)
  70. return 0;
  71. if (filter_mask & RTEXT_FILTER_BRVLAN)
  72. return vg->num_vlans;
  73. rcu_read_lock();
  74. num_vlans = __get_num_vlan_infos(vg, filter_mask);
  75. rcu_read_unlock();
  76. return num_vlans;
  77. }
  78. static size_t br_get_link_af_size_filtered(const struct net_device *dev,
  79. u32 filter_mask)
  80. {
  81. struct net_bridge_vlan_group *vg = NULL;
  82. struct net_bridge_port *p = NULL;
  83. struct net_bridge *br = NULL;
  84. u32 num_cfm_peer_mep_infos;
  85. u32 num_cfm_mep_infos;
  86. size_t vinfo_sz = 0;
  87. int num_vlan_infos;
  88. rcu_read_lock();
  89. if (netif_is_bridge_port(dev)) {
  90. p = br_port_get_check_rcu(dev);
  91. if (p)
  92. vg = nbp_vlan_group_rcu(p);
  93. } else if (netif_is_bridge_master(dev)) {
  94. br = netdev_priv(dev);
  95. vg = br_vlan_group_rcu(br);
  96. }
  97. num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
  98. rcu_read_unlock();
  99. if (p && (p->flags & BR_VLAN_TUNNEL))
  100. vinfo_sz += br_get_vlan_tunnel_info_size(vg);
  101. /* Each VLAN is returned in bridge_vlan_info along with flags */
  102. vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
  103. if (p && vg && (filter_mask & RTEXT_FILTER_MST))
  104. vinfo_sz += br_mst_info_size(vg);
  105. if (!(filter_mask & RTEXT_FILTER_CFM_STATUS))
  106. return vinfo_sz;
  107. if (!br)
  108. return vinfo_sz;
  109. /* CFM status info must be added */
  110. br_cfm_mep_count(br, &num_cfm_mep_infos);
  111. br_cfm_peer_mep_count(br, &num_cfm_peer_mep_infos);
  112. vinfo_sz += nla_total_size(0); /* IFLA_BRIDGE_CFM */
  113. /* For each status struct the MEP instance (u32) is added */
  114. /* MEP instance (u32) + br_cfm_mep_status */
  115. vinfo_sz += num_cfm_mep_infos *
  116. /*IFLA_BRIDGE_CFM_MEP_STATUS_INSTANCE */
  117. (nla_total_size(sizeof(u32))
  118. /* IFLA_BRIDGE_CFM_MEP_STATUS_OPCODE_UNEXP_SEEN */
  119. + nla_total_size(sizeof(u32))
  120. /* IFLA_BRIDGE_CFM_MEP_STATUS_VERSION_UNEXP_SEEN */
  121. + nla_total_size(sizeof(u32))
  122. /* IFLA_BRIDGE_CFM_MEP_STATUS_RX_LEVEL_LOW_SEEN */
  123. + nla_total_size(sizeof(u32)));
  124. /* MEP instance (u32) + br_cfm_cc_peer_status */
  125. vinfo_sz += num_cfm_peer_mep_infos *
  126. /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_INSTANCE */
  127. (nla_total_size(sizeof(u32))
  128. /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_PEER_MEPID */
  129. + nla_total_size(sizeof(u32))
  130. /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_CCM_DEFECT */
  131. + nla_total_size(sizeof(u32))
  132. /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_RDI */
  133. + nla_total_size(sizeof(u32))
  134. /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_PORT_TLV_VALUE */
  135. + nla_total_size(sizeof(u8))
  136. /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_IF_TLV_VALUE */
  137. + nla_total_size(sizeof(u8))
  138. /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEEN */
  139. + nla_total_size(sizeof(u32))
  140. /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_TLV_SEEN */
  141. + nla_total_size(sizeof(u32))
  142. /* IFLA_BRIDGE_CFM_CC_PEER_STATUS_SEQ_UNEXP_SEEN */
  143. + nla_total_size(sizeof(u32)));
  144. return vinfo_sz;
  145. }
  146. static inline size_t br_port_info_size(void)
  147. {
  148. return nla_total_size(1) /* IFLA_BRPORT_STATE */
  149. + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */
  150. + nla_total_size(4) /* IFLA_BRPORT_COST */
  151. + nla_total_size(1) /* IFLA_BRPORT_MODE */
  152. + nla_total_size(1) /* IFLA_BRPORT_GUARD */
  153. + nla_total_size(1) /* IFLA_BRPORT_PROTECT */
  154. + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
  155. + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */
  156. + nla_total_size(1) /* IFLA_BRPORT_LEARNING */
  157. + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
  158. + nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */
  159. + nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */
  160. + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
  161. + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
  162. + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */
  163. + nla_total_size(1) /* IFLA_BRPORT_NEIGH_SUPPRESS */
  164. + nla_total_size(1) /* IFLA_BRPORT_ISOLATED */
  165. + nla_total_size(1) /* IFLA_BRPORT_LOCKED */
  166. + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */
  167. + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */
  168. + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */
  169. + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */
  170. + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */
  171. + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */
  172. + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
  173. + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */
  174. + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
  175. + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
  176. + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
  177. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  178. + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */
  179. #endif
  180. + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */
  181. + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_RING_OPEN */
  182. + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_IN_OPEN */
  183. + nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT */
  184. + nla_total_size(sizeof(u32)) /* IFLA_BRPORT_MCAST_EHT_HOSTS_CNT */
  185. + 0;
  186. }
  187. static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
  188. {
  189. return NLMSG_ALIGN(sizeof(struct ifinfomsg))
  190. + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
  191. + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
  192. + nla_total_size(4) /* IFLA_MASTER */
  193. + nla_total_size(4) /* IFLA_MTU */
  194. + nla_total_size(4) /* IFLA_LINK */
  195. + nla_total_size(1) /* IFLA_OPERSTATE */
  196. + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
  197. + nla_total_size(br_get_link_af_size_filtered(dev,
  198. filter_mask)) /* IFLA_AF_SPEC */
  199. + nla_total_size(4); /* IFLA_BRPORT_BACKUP_PORT */
  200. }
  201. static int br_port_fill_attrs(struct sk_buff *skb,
  202. const struct net_bridge_port *p)
  203. {
  204. u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
  205. struct net_bridge_port *backup_p;
  206. u64 timerval;
  207. if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
  208. nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
  209. nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
  210. nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
  211. nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
  212. nla_put_u8(skb, IFLA_BRPORT_PROTECT,
  213. !!(p->flags & BR_ROOT_BLOCK)) ||
  214. nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
  215. !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
  216. nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST,
  217. !!(p->flags & BR_MULTICAST_TO_UNICAST)) ||
  218. nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
  219. nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
  220. !!(p->flags & BR_FLOOD)) ||
  221. nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD,
  222. !!(p->flags & BR_MCAST_FLOOD)) ||
  223. nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD,
  224. !!(p->flags & BR_BCAST_FLOOD)) ||
  225. nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
  226. nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
  227. !!(p->flags & BR_PROXYARP_WIFI)) ||
  228. nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
  229. &p->designated_root) ||
  230. nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
  231. &p->designated_bridge) ||
  232. nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
  233. nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
  234. nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
  235. nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
  236. nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
  237. p->topology_change_ack) ||
  238. nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) ||
  239. nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags &
  240. BR_VLAN_TUNNEL)) ||
  241. nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) ||
  242. nla_put_u8(skb, IFLA_BRPORT_NEIGH_SUPPRESS,
  243. !!(p->flags & BR_NEIGH_SUPPRESS)) ||
  244. nla_put_u8(skb, IFLA_BRPORT_MRP_RING_OPEN, !!(p->flags &
  245. BR_MRP_LOST_CONT)) ||
  246. nla_put_u8(skb, IFLA_BRPORT_MRP_IN_OPEN,
  247. !!(p->flags & BR_MRP_LOST_IN_CONT)) ||
  248. nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)) ||
  249. nla_put_u8(skb, IFLA_BRPORT_LOCKED, !!(p->flags & BR_PORT_LOCKED)))
  250. return -EMSGSIZE;
  251. timerval = br_timer_value(&p->message_age_timer);
  252. if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
  253. IFLA_BRPORT_PAD))
  254. return -EMSGSIZE;
  255. timerval = br_timer_value(&p->forward_delay_timer);
  256. if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
  257. IFLA_BRPORT_PAD))
  258. return -EMSGSIZE;
  259. timerval = br_timer_value(&p->hold_timer);
  260. if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
  261. IFLA_BRPORT_PAD))
  262. return -EMSGSIZE;
  263. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  264. if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
  265. p->multicast_ctx.multicast_router) ||
  266. nla_put_u32(skb, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
  267. p->multicast_eht_hosts_limit) ||
  268. nla_put_u32(skb, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
  269. p->multicast_eht_hosts_cnt))
  270. return -EMSGSIZE;
  271. #endif
  272. /* we might be called only with br->lock */
  273. rcu_read_lock();
  274. backup_p = rcu_dereference(p->backup_port);
  275. if (backup_p)
  276. nla_put_u32(skb, IFLA_BRPORT_BACKUP_PORT,
  277. backup_p->dev->ifindex);
  278. rcu_read_unlock();
  279. return 0;
  280. }
  281. static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
  282. u16 vid_end, u16 flags)
  283. {
  284. struct bridge_vlan_info vinfo;
  285. if ((vid_end - vid_start) > 0) {
  286. /* add range to skb */
  287. vinfo.vid = vid_start;
  288. vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
  289. if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
  290. sizeof(vinfo), &vinfo))
  291. goto nla_put_failure;
  292. vinfo.vid = vid_end;
  293. vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
  294. if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
  295. sizeof(vinfo), &vinfo))
  296. goto nla_put_failure;
  297. } else {
  298. vinfo.vid = vid_start;
  299. vinfo.flags = flags;
  300. if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
  301. sizeof(vinfo), &vinfo))
  302. goto nla_put_failure;
  303. }
  304. return 0;
  305. nla_put_failure:
  306. return -EMSGSIZE;
  307. }
  308. static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
  309. struct net_bridge_vlan_group *vg)
  310. {
  311. struct net_bridge_vlan *v;
  312. u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
  313. u16 flags, pvid;
  314. int err = 0;
  315. /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
  316. * and mark vlan info with begin and end flags
  317. * if vlaninfo represents a range
  318. */
  319. pvid = br_get_pvid(vg);
  320. list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
  321. flags = 0;
  322. if (!br_vlan_should_use(v))
  323. continue;
  324. if (v->vid == pvid)
  325. flags |= BRIDGE_VLAN_INFO_PVID;
  326. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  327. flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  328. if (vid_range_start == 0) {
  329. goto initvars;
  330. } else if ((v->vid - vid_range_end) == 1 &&
  331. flags == vid_range_flags) {
  332. vid_range_end = v->vid;
  333. continue;
  334. } else {
  335. err = br_fill_ifvlaninfo_range(skb, vid_range_start,
  336. vid_range_end,
  337. vid_range_flags);
  338. if (err)
  339. return err;
  340. }
  341. initvars:
  342. vid_range_start = v->vid;
  343. vid_range_end = v->vid;
  344. vid_range_flags = flags;
  345. }
  346. if (vid_range_start != 0) {
  347. /* Call it once more to send any left over vlans */
  348. err = br_fill_ifvlaninfo_range(skb, vid_range_start,
  349. vid_range_end,
  350. vid_range_flags);
  351. if (err)
  352. return err;
  353. }
  354. return 0;
  355. }
  356. static int br_fill_ifvlaninfo(struct sk_buff *skb,
  357. struct net_bridge_vlan_group *vg)
  358. {
  359. struct bridge_vlan_info vinfo;
  360. struct net_bridge_vlan *v;
  361. u16 pvid;
  362. pvid = br_get_pvid(vg);
  363. list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
  364. if (!br_vlan_should_use(v))
  365. continue;
  366. vinfo.vid = v->vid;
  367. vinfo.flags = 0;
  368. if (v->vid == pvid)
  369. vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
  370. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  371. vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  372. if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
  373. sizeof(vinfo), &vinfo))
  374. goto nla_put_failure;
  375. }
  376. return 0;
  377. nla_put_failure:
  378. return -EMSGSIZE;
  379. }
  380. /*
  381. * Create one netlink message for one interface
  382. * Contains port and master info as well as carrier and bridge state.
  383. */
  384. static int br_fill_ifinfo(struct sk_buff *skb,
  385. const struct net_bridge_port *port,
  386. u32 pid, u32 seq, int event, unsigned int flags,
  387. u32 filter_mask, const struct net_device *dev,
  388. bool getlink)
  389. {
  390. u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
  391. struct nlattr *af = NULL;
  392. struct net_bridge *br;
  393. struct ifinfomsg *hdr;
  394. struct nlmsghdr *nlh;
  395. if (port)
  396. br = port->br;
  397. else
  398. br = netdev_priv(dev);
  399. br_debug(br, "br_fill_info event %d port %s master %s\n",
  400. event, dev->name, br->dev->name);
  401. nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
  402. if (nlh == NULL)
  403. return -EMSGSIZE;
  404. hdr = nlmsg_data(nlh);
  405. hdr->ifi_family = AF_BRIDGE;
  406. hdr->__ifi_pad = 0;
  407. hdr->ifi_type = dev->type;
  408. hdr->ifi_index = dev->ifindex;
  409. hdr->ifi_flags = dev_get_flags(dev);
  410. hdr->ifi_change = 0;
  411. if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
  412. nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
  413. nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
  414. nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
  415. (dev->addr_len &&
  416. nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
  417. (dev->ifindex != dev_get_iflink(dev) &&
  418. nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
  419. goto nla_put_failure;
  420. if (event == RTM_NEWLINK && port) {
  421. struct nlattr *nest;
  422. nest = nla_nest_start(skb, IFLA_PROTINFO);
  423. if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
  424. goto nla_put_failure;
  425. nla_nest_end(skb, nest);
  426. }
  427. if (filter_mask & (RTEXT_FILTER_BRVLAN |
  428. RTEXT_FILTER_BRVLAN_COMPRESSED |
  429. RTEXT_FILTER_MRP |
  430. RTEXT_FILTER_CFM_CONFIG |
  431. RTEXT_FILTER_CFM_STATUS |
  432. RTEXT_FILTER_MST)) {
  433. af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
  434. if (!af)
  435. goto nla_put_failure;
  436. }
  437. /* Check if the VID information is requested */
  438. if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
  439. (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
  440. struct net_bridge_vlan_group *vg;
  441. int err;
  442. /* RCU needed because of the VLAN locking rules (rcu || rtnl) */
  443. rcu_read_lock();
  444. if (port)
  445. vg = nbp_vlan_group_rcu(port);
  446. else
  447. vg = br_vlan_group_rcu(br);
  448. if (!vg || !vg->num_vlans) {
  449. rcu_read_unlock();
  450. goto done;
  451. }
  452. if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
  453. err = br_fill_ifvlaninfo_compressed(skb, vg);
  454. else
  455. err = br_fill_ifvlaninfo(skb, vg);
  456. if (port && (port->flags & BR_VLAN_TUNNEL))
  457. err = br_fill_vlan_tunnel_info(skb, vg);
  458. rcu_read_unlock();
  459. if (err)
  460. goto nla_put_failure;
  461. }
  462. if (filter_mask & RTEXT_FILTER_MRP) {
  463. int err;
  464. if (!br_mrp_enabled(br) || port)
  465. goto done;
  466. rcu_read_lock();
  467. err = br_mrp_fill_info(skb, br);
  468. rcu_read_unlock();
  469. if (err)
  470. goto nla_put_failure;
  471. }
  472. if (filter_mask & (RTEXT_FILTER_CFM_CONFIG | RTEXT_FILTER_CFM_STATUS)) {
  473. struct nlattr *cfm_nest = NULL;
  474. int err;
  475. if (!br_cfm_created(br) || port)
  476. goto done;
  477. cfm_nest = nla_nest_start(skb, IFLA_BRIDGE_CFM);
  478. if (!cfm_nest)
  479. goto nla_put_failure;
  480. if (filter_mask & RTEXT_FILTER_CFM_CONFIG) {
  481. rcu_read_lock();
  482. err = br_cfm_config_fill_info(skb, br);
  483. rcu_read_unlock();
  484. if (err)
  485. goto nla_put_failure;
  486. }
  487. if (filter_mask & RTEXT_FILTER_CFM_STATUS) {
  488. rcu_read_lock();
  489. err = br_cfm_status_fill_info(skb, br, getlink);
  490. rcu_read_unlock();
  491. if (err)
  492. goto nla_put_failure;
  493. }
  494. nla_nest_end(skb, cfm_nest);
  495. }
  496. if ((filter_mask & RTEXT_FILTER_MST) &&
  497. br_opt_get(br, BROPT_MST_ENABLED) && port) {
  498. const struct net_bridge_vlan_group *vg = nbp_vlan_group(port);
  499. struct nlattr *mst_nest;
  500. int err;
  501. if (!vg || !vg->num_vlans)
  502. goto done;
  503. mst_nest = nla_nest_start(skb, IFLA_BRIDGE_MST);
  504. if (!mst_nest)
  505. goto nla_put_failure;
  506. err = br_mst_fill_info(skb, vg);
  507. if (err)
  508. goto nla_put_failure;
  509. nla_nest_end(skb, mst_nest);
  510. }
  511. done:
  512. if (af) {
  513. if (nlmsg_get_pos(skb) - (void *)af > nla_attr_size(0))
  514. nla_nest_end(skb, af);
  515. else
  516. nla_nest_cancel(skb, af);
  517. }
  518. nlmsg_end(skb, nlh);
  519. return 0;
  520. nla_put_failure:
  521. nlmsg_cancel(skb, nlh);
  522. return -EMSGSIZE;
  523. }
  524. void br_info_notify(int event, const struct net_bridge *br,
  525. const struct net_bridge_port *port, u32 filter)
  526. {
  527. struct net_device *dev;
  528. struct sk_buff *skb;
  529. int err = -ENOBUFS;
  530. struct net *net;
  531. u16 port_no = 0;
  532. if (WARN_ON(!port && !br))
  533. return;
  534. if (port) {
  535. dev = port->dev;
  536. br = port->br;
  537. port_no = port->port_no;
  538. } else {
  539. dev = br->dev;
  540. }
  541. net = dev_net(dev);
  542. br_debug(br, "port %u(%s) event %d\n", port_no, dev->name, event);
  543. skb = nlmsg_new(br_nlmsg_size(dev, filter), GFP_ATOMIC);
  544. if (skb == NULL)
  545. goto errout;
  546. err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev, false);
  547. if (err < 0) {
  548. /* -EMSGSIZE implies BUG in br_nlmsg_size() */
  549. WARN_ON(err == -EMSGSIZE);
  550. kfree_skb(skb);
  551. goto errout;
  552. }
  553. rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
  554. return;
  555. errout:
  556. rtnl_set_sk_err(net, RTNLGRP_LINK, err);
  557. }
  558. /* Notify listeners of a change in bridge or port information */
  559. void br_ifinfo_notify(int event, const struct net_bridge *br,
  560. const struct net_bridge_port *port)
  561. {
  562. u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
  563. return br_info_notify(event, br, port, filter);
  564. }
  565. /*
  566. * Dump information about all ports, in response to GETLINK
  567. */
  568. int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
  569. struct net_device *dev, u32 filter_mask, int nlflags)
  570. {
  571. struct net_bridge_port *port = br_port_get_rtnl(dev);
  572. if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
  573. !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) &&
  574. !(filter_mask & RTEXT_FILTER_MRP) &&
  575. !(filter_mask & RTEXT_FILTER_CFM_CONFIG) &&
  576. !(filter_mask & RTEXT_FILTER_CFM_STATUS))
  577. return 0;
  578. return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
  579. filter_mask, dev, true);
  580. }
  581. static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
  582. int cmd, struct bridge_vlan_info *vinfo, bool *changed,
  583. struct netlink_ext_ack *extack)
  584. {
  585. bool curr_change;
  586. int err = 0;
  587. switch (cmd) {
  588. case RTM_SETLINK:
  589. if (p) {
  590. /* if the MASTER flag is set this will act on the global
  591. * per-VLAN entry as well
  592. */
  593. err = nbp_vlan_add(p, vinfo->vid, vinfo->flags,
  594. &curr_change, extack);
  595. } else {
  596. vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
  597. err = br_vlan_add(br, vinfo->vid, vinfo->flags,
  598. &curr_change, extack);
  599. }
  600. if (curr_change)
  601. *changed = true;
  602. break;
  603. case RTM_DELLINK:
  604. if (p) {
  605. if (!nbp_vlan_delete(p, vinfo->vid))
  606. *changed = true;
  607. if ((vinfo->flags & BRIDGE_VLAN_INFO_MASTER) &&
  608. !br_vlan_delete(p->br, vinfo->vid))
  609. *changed = true;
  610. } else if (!br_vlan_delete(br, vinfo->vid)) {
  611. *changed = true;
  612. }
  613. break;
  614. }
  615. return err;
  616. }
  617. int br_process_vlan_info(struct net_bridge *br,
  618. struct net_bridge_port *p, int cmd,
  619. struct bridge_vlan_info *vinfo_curr,
  620. struct bridge_vlan_info **vinfo_last,
  621. bool *changed,
  622. struct netlink_ext_ack *extack)
  623. {
  624. int err, rtm_cmd;
  625. if (!br_vlan_valid_id(vinfo_curr->vid, extack))
  626. return -EINVAL;
  627. /* needed for vlan-only NEWVLAN/DELVLAN notifications */
  628. rtm_cmd = br_afspec_cmd_to_rtm(cmd);
  629. if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
  630. if (!br_vlan_valid_range(vinfo_curr, *vinfo_last, extack))
  631. return -EINVAL;
  632. *vinfo_last = vinfo_curr;
  633. return 0;
  634. }
  635. if (*vinfo_last) {
  636. struct bridge_vlan_info tmp_vinfo;
  637. int v, v_change_start = 0;
  638. if (!br_vlan_valid_range(vinfo_curr, *vinfo_last, extack))
  639. return -EINVAL;
  640. memcpy(&tmp_vinfo, *vinfo_last,
  641. sizeof(struct bridge_vlan_info));
  642. for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) {
  643. bool curr_change = false;
  644. tmp_vinfo.vid = v;
  645. err = br_vlan_info(br, p, cmd, &tmp_vinfo, &curr_change,
  646. extack);
  647. if (err)
  648. break;
  649. if (curr_change) {
  650. *changed = curr_change;
  651. if (!v_change_start)
  652. v_change_start = v;
  653. } else {
  654. /* nothing to notify yet */
  655. if (!v_change_start)
  656. continue;
  657. br_vlan_notify(br, p, v_change_start,
  658. v - 1, rtm_cmd);
  659. v_change_start = 0;
  660. }
  661. cond_resched();
  662. }
  663. /* v_change_start is set only if the last/whole range changed */
  664. if (v_change_start)
  665. br_vlan_notify(br, p, v_change_start,
  666. v - 1, rtm_cmd);
  667. *vinfo_last = NULL;
  668. return err;
  669. }
  670. err = br_vlan_info(br, p, cmd, vinfo_curr, changed, extack);
  671. if (*changed)
  672. br_vlan_notify(br, p, vinfo_curr->vid, 0, rtm_cmd);
  673. return err;
  674. }
  675. static int br_afspec(struct net_bridge *br,
  676. struct net_bridge_port *p,
  677. struct nlattr *af_spec,
  678. int cmd, bool *changed,
  679. struct netlink_ext_ack *extack)
  680. {
  681. struct bridge_vlan_info *vinfo_curr = NULL;
  682. struct bridge_vlan_info *vinfo_last = NULL;
  683. struct nlattr *attr;
  684. struct vtunnel_info tinfo_last = {};
  685. struct vtunnel_info tinfo_curr = {};
  686. int err = 0, rem;
  687. nla_for_each_nested(attr, af_spec, rem) {
  688. err = 0;
  689. switch (nla_type(attr)) {
  690. case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
  691. if (!p || !(p->flags & BR_VLAN_TUNNEL))
  692. return -EINVAL;
  693. err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
  694. if (err)
  695. return err;
  696. err = br_process_vlan_tunnel_info(br, p, cmd,
  697. &tinfo_curr,
  698. &tinfo_last,
  699. changed);
  700. if (err)
  701. return err;
  702. break;
  703. case IFLA_BRIDGE_VLAN_INFO:
  704. if (nla_len(attr) != sizeof(struct bridge_vlan_info))
  705. return -EINVAL;
  706. vinfo_curr = nla_data(attr);
  707. err = br_process_vlan_info(br, p, cmd, vinfo_curr,
  708. &vinfo_last, changed,
  709. extack);
  710. if (err)
  711. return err;
  712. break;
  713. case IFLA_BRIDGE_MRP:
  714. err = br_mrp_parse(br, p, attr, cmd, extack);
  715. if (err)
  716. return err;
  717. break;
  718. case IFLA_BRIDGE_CFM:
  719. err = br_cfm_parse(br, p, attr, cmd, extack);
  720. if (err)
  721. return err;
  722. break;
  723. case IFLA_BRIDGE_MST:
  724. if (!p) {
  725. NL_SET_ERR_MSG(extack,
  726. "MST states can only be set on bridge ports");
  727. return -EINVAL;
  728. }
  729. if (cmd != RTM_SETLINK) {
  730. NL_SET_ERR_MSG(extack,
  731. "MST states can only be set through RTM_SETLINK");
  732. return -EINVAL;
  733. }
  734. err = br_mst_process(p, attr, extack);
  735. if (err)
  736. return err;
  737. break;
  738. }
  739. }
  740. return err;
  741. }
  742. static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
  743. [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
  744. [IFLA_BRPORT_COST] = { .type = NLA_U32 },
  745. [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
  746. [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
  747. [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
  748. [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
  749. [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
  750. [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
  751. [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
  752. [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
  753. [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
  754. [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
  755. [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
  756. [IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 },
  757. [IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 },
  758. [IFLA_BRPORT_VLAN_TUNNEL] = { .type = NLA_U8 },
  759. [IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 },
  760. [IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 },
  761. [IFLA_BRPORT_ISOLATED] = { .type = NLA_U8 },
  762. [IFLA_BRPORT_LOCKED] = { .type = NLA_U8 },
  763. [IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 },
  764. [IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT] = { .type = NLA_U32 },
  765. };
  766. /* Change the state of the port and notify spanning tree */
  767. static int br_set_port_state(struct net_bridge_port *p, u8 state)
  768. {
  769. if (state > BR_STATE_BLOCKING)
  770. return -EINVAL;
  771. /* if kernel STP is running, don't allow changes */
  772. if (p->br->stp_enabled == BR_KERNEL_STP)
  773. return -EBUSY;
  774. /* if device is not up, change is not allowed
  775. * if link is not present, only allowable state is disabled
  776. */
  777. if (!netif_running(p->dev) ||
  778. (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
  779. return -ENETDOWN;
  780. br_set_state(p, state);
  781. br_port_state_selection(p->br);
  782. return 0;
  783. }
  784. /* Set/clear or port flags based on attribute */
  785. static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
  786. int attrtype, unsigned long mask)
  787. {
  788. if (!tb[attrtype])
  789. return;
  790. if (nla_get_u8(tb[attrtype]))
  791. p->flags |= mask;
  792. else
  793. p->flags &= ~mask;
  794. }
  795. /* Process bridge protocol info on port */
  796. static int br_setport(struct net_bridge_port *p, struct nlattr *tb[],
  797. struct netlink_ext_ack *extack)
  798. {
  799. unsigned long old_flags, changed_mask;
  800. bool br_vlan_tunnel_old;
  801. int err;
  802. old_flags = p->flags;
  803. br_vlan_tunnel_old = (old_flags & BR_VLAN_TUNNEL) ? true : false;
  804. br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
  805. br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
  806. br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE,
  807. BR_MULTICAST_FAST_LEAVE);
  808. br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
  809. br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
  810. br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
  811. br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
  812. br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST,
  813. BR_MULTICAST_TO_UNICAST);
  814. br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
  815. br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
  816. br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
  817. br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
  818. br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS, BR_NEIGH_SUPPRESS);
  819. br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED);
  820. br_set_port_flag(p, tb, IFLA_BRPORT_LOCKED, BR_PORT_LOCKED);
  821. changed_mask = old_flags ^ p->flags;
  822. err = br_switchdev_set_port_flag(p, p->flags, changed_mask, extack);
  823. if (err) {
  824. p->flags = old_flags;
  825. return err;
  826. }
  827. if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
  828. nbp_vlan_tunnel_info_flush(p);
  829. br_port_flags_change(p, changed_mask);
  830. if (tb[IFLA_BRPORT_COST]) {
  831. err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
  832. if (err)
  833. return err;
  834. }
  835. if (tb[IFLA_BRPORT_PRIORITY]) {
  836. err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
  837. if (err)
  838. return err;
  839. }
  840. if (tb[IFLA_BRPORT_STATE]) {
  841. err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
  842. if (err)
  843. return err;
  844. }
  845. if (tb[IFLA_BRPORT_FLUSH])
  846. br_fdb_delete_by_port(p->br, p, 0, 0);
  847. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  848. if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
  849. u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);
  850. err = br_multicast_set_port_router(&p->multicast_ctx,
  851. mcast_router);
  852. if (err)
  853. return err;
  854. }
  855. if (tb[IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT]) {
  856. u32 hlimit;
  857. hlimit = nla_get_u32(tb[IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT]);
  858. err = br_multicast_eht_set_hosts_limit(p, hlimit);
  859. if (err)
  860. return err;
  861. }
  862. #endif
  863. if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) {
  864. u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]);
  865. if (fwd_mask & BR_GROUPFWD_MACPAUSE)
  866. return -EINVAL;
  867. p->group_fwd_mask = fwd_mask;
  868. }
  869. if (tb[IFLA_BRPORT_BACKUP_PORT]) {
  870. struct net_device *backup_dev = NULL;
  871. u32 backup_ifindex;
  872. backup_ifindex = nla_get_u32(tb[IFLA_BRPORT_BACKUP_PORT]);
  873. if (backup_ifindex) {
  874. backup_dev = __dev_get_by_index(dev_net(p->dev),
  875. backup_ifindex);
  876. if (!backup_dev)
  877. return -ENOENT;
  878. }
  879. err = nbp_backup_change(p, backup_dev);
  880. if (err)
  881. return err;
  882. }
  883. return 0;
  884. }
  885. /* Change state and parameters on port. */
  886. int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags,
  887. struct netlink_ext_ack *extack)
  888. {
  889. struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
  890. struct nlattr *tb[IFLA_BRPORT_MAX + 1];
  891. struct net_bridge_port *p;
  892. struct nlattr *protinfo;
  893. struct nlattr *afspec;
  894. bool changed = false;
  895. int err = 0;
  896. protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
  897. afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
  898. if (!protinfo && !afspec)
  899. return 0;
  900. p = br_port_get_rtnl(dev);
  901. /* We want to accept dev as bridge itself if the AF_SPEC
  902. * is set to see if someone is setting vlan info on the bridge
  903. */
  904. if (!p && !afspec)
  905. return -EINVAL;
  906. if (p && protinfo) {
  907. if (protinfo->nla_type & NLA_F_NESTED) {
  908. err = nla_parse_nested_deprecated(tb, IFLA_BRPORT_MAX,
  909. protinfo,
  910. br_port_policy,
  911. NULL);
  912. if (err)
  913. return err;
  914. spin_lock_bh(&p->br->lock);
  915. err = br_setport(p, tb, extack);
  916. spin_unlock_bh(&p->br->lock);
  917. } else {
  918. /* Binary compatibility with old RSTP */
  919. if (nla_len(protinfo) < sizeof(u8))
  920. return -EINVAL;
  921. spin_lock_bh(&p->br->lock);
  922. err = br_set_port_state(p, nla_get_u8(protinfo));
  923. spin_unlock_bh(&p->br->lock);
  924. }
  925. if (err)
  926. goto out;
  927. changed = true;
  928. }
  929. if (afspec)
  930. err = br_afspec(br, p, afspec, RTM_SETLINK, &changed, extack);
  931. if (changed)
  932. br_ifinfo_notify(RTM_NEWLINK, br, p);
  933. out:
  934. return err;
  935. }
  936. /* Delete port information */
  937. int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
  938. {
  939. struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
  940. struct net_bridge_port *p;
  941. struct nlattr *afspec;
  942. bool changed = false;
  943. int err = 0;
  944. afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
  945. if (!afspec)
  946. return 0;
  947. p = br_port_get_rtnl(dev);
  948. /* We want to accept dev as bridge itself as well */
  949. if (!p && !netif_is_bridge_master(dev))
  950. return -EINVAL;
  951. err = br_afspec(br, p, afspec, RTM_DELLINK, &changed, NULL);
  952. if (changed)
  953. /* Send RTM_NEWLINK because userspace
  954. * expects RTM_NEWLINK for vlan dels
  955. */
  956. br_ifinfo_notify(RTM_NEWLINK, br, p);
  957. return err;
  958. }
  959. static int br_validate(struct nlattr *tb[], struct nlattr *data[],
  960. struct netlink_ext_ack *extack)
  961. {
  962. if (tb[IFLA_ADDRESS]) {
  963. if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
  964. return -EINVAL;
  965. if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
  966. return -EADDRNOTAVAIL;
  967. }
  968. if (!data)
  969. return 0;
  970. #ifdef CONFIG_BRIDGE_VLAN_FILTERING
  971. if (data[IFLA_BR_VLAN_PROTOCOL] &&
  972. !eth_type_vlan(nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])))
  973. return -EPROTONOSUPPORT;
  974. if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
  975. __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
  976. if (defpvid >= VLAN_VID_MASK)
  977. return -EINVAL;
  978. }
  979. #endif
  980. return 0;
  981. }
  982. static int br_port_slave_changelink(struct net_device *brdev,
  983. struct net_device *dev,
  984. struct nlattr *tb[],
  985. struct nlattr *data[],
  986. struct netlink_ext_ack *extack)
  987. {
  988. struct net_bridge *br = netdev_priv(brdev);
  989. int ret;
  990. if (!data)
  991. return 0;
  992. spin_lock_bh(&br->lock);
  993. ret = br_setport(br_port_get_rtnl(dev), data, extack);
  994. spin_unlock_bh(&br->lock);
  995. return ret;
  996. }
  997. static int br_port_fill_slave_info(struct sk_buff *skb,
  998. const struct net_device *brdev,
  999. const struct net_device *dev)
  1000. {
  1001. return br_port_fill_attrs(skb, br_port_get_rtnl(dev));
  1002. }
  1003. static size_t br_port_get_slave_size(const struct net_device *brdev,
  1004. const struct net_device *dev)
  1005. {
  1006. return br_port_info_size();
  1007. }
  1008. static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
  1009. [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 },
  1010. [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 },
  1011. [IFLA_BR_MAX_AGE] = { .type = NLA_U32 },
  1012. [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
  1013. [IFLA_BR_STP_STATE] = { .type = NLA_U32 },
  1014. [IFLA_BR_PRIORITY] = { .type = NLA_U16 },
  1015. [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
  1016. [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
  1017. [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 },
  1018. [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY,
  1019. .len = ETH_ALEN },
  1020. [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 },
  1021. [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 },
  1022. [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 },
  1023. [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 },
  1024. [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 },
  1025. [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 },
  1026. [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
  1027. [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
  1028. [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
  1029. [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
  1030. [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
  1031. [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
  1032. [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
  1033. [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
  1034. [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 },
  1035. [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
  1036. [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
  1037. [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
  1038. [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
  1039. [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
  1040. [IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
  1041. [IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 },
  1042. [IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 },
  1043. [IFLA_BR_MULTI_BOOLOPT] =
  1044. NLA_POLICY_EXACT_LEN(sizeof(struct br_boolopt_multi)),
  1045. };
  1046. static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
  1047. struct nlattr *data[],
  1048. struct netlink_ext_ack *extack)
  1049. {
  1050. struct net_bridge *br = netdev_priv(brdev);
  1051. int err;
  1052. if (!data)
  1053. return 0;
  1054. if (data[IFLA_BR_FORWARD_DELAY]) {
  1055. err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY]));
  1056. if (err)
  1057. return err;
  1058. }
  1059. if (data[IFLA_BR_HELLO_TIME]) {
  1060. err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME]));
  1061. if (err)
  1062. return err;
  1063. }
  1064. if (data[IFLA_BR_MAX_AGE]) {
  1065. err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE]));
  1066. if (err)
  1067. return err;
  1068. }
  1069. if (data[IFLA_BR_AGEING_TIME]) {
  1070. err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME]));
  1071. if (err)
  1072. return err;
  1073. }
  1074. if (data[IFLA_BR_STP_STATE]) {
  1075. u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]);
  1076. err = br_stp_set_enabled(br, stp_enabled, extack);
  1077. if (err)
  1078. return err;
  1079. }
  1080. if (data[IFLA_BR_PRIORITY]) {
  1081. u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]);
  1082. br_stp_set_bridge_priority(br, priority);
  1083. }
  1084. if (data[IFLA_BR_VLAN_FILTERING]) {
  1085. u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);
  1086. err = br_vlan_filter_toggle(br, vlan_filter, extack);
  1087. if (err)
  1088. return err;
  1089. }
  1090. #ifdef CONFIG_BRIDGE_VLAN_FILTERING
  1091. if (data[IFLA_BR_VLAN_PROTOCOL]) {
  1092. __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]);
  1093. err = __br_vlan_set_proto(br, vlan_proto, extack);
  1094. if (err)
  1095. return err;
  1096. }
  1097. if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
  1098. __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
  1099. err = __br_vlan_set_default_pvid(br, defpvid, extack);
  1100. if (err)
  1101. return err;
  1102. }
  1103. if (data[IFLA_BR_VLAN_STATS_ENABLED]) {
  1104. __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]);
  1105. err = br_vlan_set_stats(br, vlan_stats);
  1106. if (err)
  1107. return err;
  1108. }
  1109. if (data[IFLA_BR_VLAN_STATS_PER_PORT]) {
  1110. __u8 per_port = nla_get_u8(data[IFLA_BR_VLAN_STATS_PER_PORT]);
  1111. err = br_vlan_set_stats_per_port(br, per_port);
  1112. if (err)
  1113. return err;
  1114. }
  1115. #endif
  1116. if (data[IFLA_BR_GROUP_FWD_MASK]) {
  1117. u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]);
  1118. if (fwd_mask & BR_GROUPFWD_RESTRICTED)
  1119. return -EINVAL;
  1120. br->group_fwd_mask = fwd_mask;
  1121. }
  1122. if (data[IFLA_BR_GROUP_ADDR]) {
  1123. u8 new_addr[ETH_ALEN];
  1124. if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN)
  1125. return -EINVAL;
  1126. memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN);
  1127. if (!is_link_local_ether_addr(new_addr))
  1128. return -EINVAL;
  1129. if (new_addr[5] == 1 || /* 802.3x Pause address */
  1130. new_addr[5] == 2 || /* 802.3ad Slow protocols */
  1131. new_addr[5] == 3) /* 802.1X PAE address */
  1132. return -EINVAL;
  1133. spin_lock_bh(&br->lock);
  1134. memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
  1135. spin_unlock_bh(&br->lock);
  1136. br_opt_toggle(br, BROPT_GROUP_ADDR_SET, true);
  1137. br_recalculate_fwd_mask(br);
  1138. }
  1139. if (data[IFLA_BR_FDB_FLUSH]) {
  1140. struct net_bridge_fdb_flush_desc desc = {
  1141. .flags_mask = BIT(BR_FDB_STATIC)
  1142. };
  1143. br_fdb_flush(br, &desc);
  1144. }
  1145. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  1146. if (data[IFLA_BR_MCAST_ROUTER]) {
  1147. u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]);
  1148. err = br_multicast_set_router(&br->multicast_ctx,
  1149. multicast_router);
  1150. if (err)
  1151. return err;
  1152. }
  1153. if (data[IFLA_BR_MCAST_SNOOPING]) {
  1154. u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
  1155. err = br_multicast_toggle(br, mcast_snooping, extack);
  1156. if (err)
  1157. return err;
  1158. }
  1159. if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
  1160. u8 val;
  1161. val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
  1162. br_opt_toggle(br, BROPT_MULTICAST_QUERY_USE_IFADDR, !!val);
  1163. }
  1164. if (data[IFLA_BR_MCAST_QUERIER]) {
  1165. u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]);
  1166. err = br_multicast_set_querier(&br->multicast_ctx,
  1167. mcast_querier);
  1168. if (err)
  1169. return err;
  1170. }
  1171. if (data[IFLA_BR_MCAST_HASH_ELASTICITY])
  1172. br_warn(br, "the hash_elasticity option has been deprecated and is always %u\n",
  1173. RHT_ELASTICITY);
  1174. if (data[IFLA_BR_MCAST_HASH_MAX])
  1175. br->hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
  1176. if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
  1177. u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);
  1178. br->multicast_ctx.multicast_last_member_count = val;
  1179. }
  1180. if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
  1181. u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);
  1182. br->multicast_ctx.multicast_startup_query_count = val;
  1183. }
  1184. if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
  1185. u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);
  1186. br->multicast_ctx.multicast_last_member_interval = clock_t_to_jiffies(val);
  1187. }
  1188. if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
  1189. u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);
  1190. br->multicast_ctx.multicast_membership_interval = clock_t_to_jiffies(val);
  1191. }
  1192. if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
  1193. u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);
  1194. br->multicast_ctx.multicast_querier_interval = clock_t_to_jiffies(val);
  1195. }
  1196. if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
  1197. u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
  1198. br_multicast_set_query_intvl(&br->multicast_ctx, val);
  1199. }
  1200. if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
  1201. u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);
  1202. br->multicast_ctx.multicast_query_response_interval = clock_t_to_jiffies(val);
  1203. }
  1204. if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
  1205. u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
  1206. br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
  1207. }
  1208. if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
  1209. __u8 mcast_stats;
  1210. mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]);
  1211. br_opt_toggle(br, BROPT_MULTICAST_STATS_ENABLED, !!mcast_stats);
  1212. }
  1213. if (data[IFLA_BR_MCAST_IGMP_VERSION]) {
  1214. __u8 igmp_version;
  1215. igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]);
  1216. err = br_multicast_set_igmp_version(&br->multicast_ctx,
  1217. igmp_version);
  1218. if (err)
  1219. return err;
  1220. }
  1221. #if IS_ENABLED(CONFIG_IPV6)
  1222. if (data[IFLA_BR_MCAST_MLD_VERSION]) {
  1223. __u8 mld_version;
  1224. mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]);
  1225. err = br_multicast_set_mld_version(&br->multicast_ctx,
  1226. mld_version);
  1227. if (err)
  1228. return err;
  1229. }
  1230. #endif
  1231. #endif
  1232. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  1233. if (data[IFLA_BR_NF_CALL_IPTABLES]) {
  1234. u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);
  1235. br_opt_toggle(br, BROPT_NF_CALL_IPTABLES, !!val);
  1236. }
  1237. if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
  1238. u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);
  1239. br_opt_toggle(br, BROPT_NF_CALL_IP6TABLES, !!val);
  1240. }
  1241. if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
  1242. u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);
  1243. br_opt_toggle(br, BROPT_NF_CALL_ARPTABLES, !!val);
  1244. }
  1245. #endif
  1246. if (data[IFLA_BR_MULTI_BOOLOPT]) {
  1247. struct br_boolopt_multi *bm;
  1248. bm = nla_data(data[IFLA_BR_MULTI_BOOLOPT]);
  1249. err = br_boolopt_multi_toggle(br, bm, extack);
  1250. if (err)
  1251. return err;
  1252. }
  1253. return 0;
  1254. }
  1255. static int br_dev_newlink(struct net *src_net, struct net_device *dev,
  1256. struct nlattr *tb[], struct nlattr *data[],
  1257. struct netlink_ext_ack *extack)
  1258. {
  1259. struct net_bridge *br = netdev_priv(dev);
  1260. int err;
  1261. err = register_netdevice(dev);
  1262. if (err)
  1263. return err;
  1264. if (tb[IFLA_ADDRESS]) {
  1265. spin_lock_bh(&br->lock);
  1266. br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
  1267. spin_unlock_bh(&br->lock);
  1268. }
  1269. err = br_changelink(dev, tb, data, extack);
  1270. if (err)
  1271. br_dev_delete(dev, NULL);
  1272. return err;
  1273. }
  1274. static size_t br_get_size(const struct net_device *brdev)
  1275. {
  1276. return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
  1277. nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */
  1278. nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */
  1279. nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */
  1280. nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */
  1281. nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */
  1282. nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */
  1283. #ifdef CONFIG_BRIDGE_VLAN_FILTERING
  1284. nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */
  1285. nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */
  1286. nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */
  1287. nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_PER_PORT */
  1288. #endif
  1289. nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */
  1290. nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */
  1291. nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */
  1292. nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */
  1293. nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */
  1294. nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */
  1295. nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
  1296. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
  1297. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
  1298. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
  1299. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
  1300. nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */
  1301. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  1302. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */
  1303. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */
  1304. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
  1305. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */
  1306. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */
  1307. nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */
  1308. nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */
  1309. nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
  1310. nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
  1311. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
  1312. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
  1313. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
  1314. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
  1315. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
  1316. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
  1317. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_IGMP_VERSION */
  1318. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_MLD_VERSION */
  1319. br_multicast_querier_state_size() + /* IFLA_BR_MCAST_QUERIER_STATE */
  1320. #endif
  1321. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  1322. nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */
  1323. nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */
  1324. nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */
  1325. #endif
  1326. nla_total_size(sizeof(struct br_boolopt_multi)) + /* IFLA_BR_MULTI_BOOLOPT */
  1327. 0;
  1328. }
  1329. static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
  1330. {
  1331. struct net_bridge *br = netdev_priv(brdev);
  1332. u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
  1333. u32 hello_time = jiffies_to_clock_t(br->hello_time);
  1334. u32 age_time = jiffies_to_clock_t(br->max_age);
  1335. u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
  1336. u32 stp_enabled = br->stp_enabled;
  1337. u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
  1338. u8 vlan_enabled = br_vlan_enabled(br->dev);
  1339. struct br_boolopt_multi bm;
  1340. u64 clockval;
  1341. clockval = br_timer_value(&br->hello_timer);
  1342. if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
  1343. return -EMSGSIZE;
  1344. clockval = br_timer_value(&br->tcn_timer);
  1345. if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
  1346. return -EMSGSIZE;
  1347. clockval = br_timer_value(&br->topology_change_timer);
  1348. if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
  1349. IFLA_BR_PAD))
  1350. return -EMSGSIZE;
  1351. clockval = br_timer_value(&br->gc_work.timer);
  1352. if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
  1353. return -EMSGSIZE;
  1354. br_boolopt_multi_get(br, &bm);
  1355. if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
  1356. nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
  1357. nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
  1358. nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
  1359. nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
  1360. nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
  1361. nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
  1362. nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
  1363. nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
  1364. &br->bridge_id) ||
  1365. nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
  1366. &br->designated_root) ||
  1367. nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
  1368. nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
  1369. nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
  1370. nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
  1371. br->topology_change_detected) ||
  1372. nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr) ||
  1373. nla_put(skb, IFLA_BR_MULTI_BOOLOPT, sizeof(bm), &bm))
  1374. return -EMSGSIZE;
  1375. #ifdef CONFIG_BRIDGE_VLAN_FILTERING
  1376. if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
  1377. nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
  1378. nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
  1379. br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
  1380. nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
  1381. br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)))
  1382. return -EMSGSIZE;
  1383. #endif
  1384. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  1385. if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER,
  1386. br->multicast_ctx.multicast_router) ||
  1387. nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING,
  1388. br_opt_get(br, BROPT_MULTICAST_ENABLED)) ||
  1389. nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
  1390. br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR)) ||
  1391. nla_put_u8(skb, IFLA_BR_MCAST_QUERIER,
  1392. br->multicast_ctx.multicast_querier) ||
  1393. nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
  1394. br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) ||
  1395. nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, RHT_ELASTICITY) ||
  1396. nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
  1397. nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
  1398. br->multicast_ctx.multicast_last_member_count) ||
  1399. nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
  1400. br->multicast_ctx.multicast_startup_query_count) ||
  1401. nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION,
  1402. br->multicast_ctx.multicast_igmp_version) ||
  1403. br_multicast_dump_querier_state(skb, &br->multicast_ctx,
  1404. IFLA_BR_MCAST_QUERIER_STATE))
  1405. return -EMSGSIZE;
  1406. #if IS_ENABLED(CONFIG_IPV6)
  1407. if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION,
  1408. br->multicast_ctx.multicast_mld_version))
  1409. return -EMSGSIZE;
  1410. #endif
  1411. clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_last_member_interval);
  1412. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
  1413. IFLA_BR_PAD))
  1414. return -EMSGSIZE;
  1415. clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_membership_interval);
  1416. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
  1417. IFLA_BR_PAD))
  1418. return -EMSGSIZE;
  1419. clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_querier_interval);
  1420. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
  1421. IFLA_BR_PAD))
  1422. return -EMSGSIZE;
  1423. clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_query_interval);
  1424. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
  1425. IFLA_BR_PAD))
  1426. return -EMSGSIZE;
  1427. clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_query_response_interval);
  1428. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
  1429. IFLA_BR_PAD))
  1430. return -EMSGSIZE;
  1431. clockval = jiffies_to_clock_t(br->multicast_ctx.multicast_startup_query_interval);
  1432. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
  1433. IFLA_BR_PAD))
  1434. return -EMSGSIZE;
  1435. #endif
  1436. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  1437. if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
  1438. br_opt_get(br, BROPT_NF_CALL_IPTABLES) ? 1 : 0) ||
  1439. nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
  1440. br_opt_get(br, BROPT_NF_CALL_IP6TABLES) ? 1 : 0) ||
  1441. nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
  1442. br_opt_get(br, BROPT_NF_CALL_ARPTABLES) ? 1 : 0))
  1443. return -EMSGSIZE;
  1444. #endif
  1445. return 0;
  1446. }
  1447. static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
  1448. {
  1449. struct net_bridge_port *p = NULL;
  1450. struct net_bridge_vlan_group *vg;
  1451. struct net_bridge_vlan *v;
  1452. struct net_bridge *br;
  1453. int numvls = 0;
  1454. switch (attr) {
  1455. case IFLA_STATS_LINK_XSTATS:
  1456. br = netdev_priv(dev);
  1457. vg = br_vlan_group(br);
  1458. break;
  1459. case IFLA_STATS_LINK_XSTATS_SLAVE:
  1460. p = br_port_get_rtnl(dev);
  1461. if (!p)
  1462. return 0;
  1463. vg = nbp_vlan_group(p);
  1464. break;
  1465. default:
  1466. return 0;
  1467. }
  1468. if (vg) {
  1469. /* we need to count all, even placeholder entries */
  1470. list_for_each_entry(v, &vg->vlan_list, vlist)
  1471. numvls++;
  1472. }
  1473. return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
  1474. nla_total_size_64bit(sizeof(struct br_mcast_stats)) +
  1475. (p ? nla_total_size_64bit(sizeof(p->stp_xstats)) : 0) +
  1476. nla_total_size(0);
  1477. }
  1478. static int br_fill_linkxstats(struct sk_buff *skb,
  1479. const struct net_device *dev,
  1480. int *prividx, int attr)
  1481. {
  1482. struct nlattr *nla __maybe_unused;
  1483. struct net_bridge_port *p = NULL;
  1484. struct net_bridge_vlan_group *vg;
  1485. struct net_bridge_vlan *v;
  1486. struct net_bridge *br;
  1487. struct nlattr *nest;
  1488. int vl_idx = 0;
  1489. switch (attr) {
  1490. case IFLA_STATS_LINK_XSTATS:
  1491. br = netdev_priv(dev);
  1492. vg = br_vlan_group(br);
  1493. break;
  1494. case IFLA_STATS_LINK_XSTATS_SLAVE:
  1495. p = br_port_get_rtnl(dev);
  1496. if (!p)
  1497. return 0;
  1498. br = p->br;
  1499. vg = nbp_vlan_group(p);
  1500. break;
  1501. default:
  1502. return -EINVAL;
  1503. }
  1504. nest = nla_nest_start_noflag(skb, LINK_XSTATS_TYPE_BRIDGE);
  1505. if (!nest)
  1506. return -EMSGSIZE;
  1507. if (vg) {
  1508. u16 pvid;
  1509. pvid = br_get_pvid(vg);
  1510. list_for_each_entry(v, &vg->vlan_list, vlist) {
  1511. struct bridge_vlan_xstats vxi;
  1512. struct pcpu_sw_netstats stats;
  1513. if (++vl_idx < *prividx)
  1514. continue;
  1515. memset(&vxi, 0, sizeof(vxi));
  1516. vxi.vid = v->vid;
  1517. vxi.flags = v->flags;
  1518. if (v->vid == pvid)
  1519. vxi.flags |= BRIDGE_VLAN_INFO_PVID;
  1520. br_vlan_get_stats(v, &stats);
  1521. vxi.rx_bytes = u64_stats_read(&stats.rx_bytes);
  1522. vxi.rx_packets = u64_stats_read(&stats.rx_packets);
  1523. vxi.tx_bytes = u64_stats_read(&stats.tx_bytes);
  1524. vxi.tx_packets = u64_stats_read(&stats.tx_packets);
  1525. if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
  1526. goto nla_put_failure;
  1527. }
  1528. }
  1529. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  1530. if (++vl_idx >= *prividx) {
  1531. nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
  1532. sizeof(struct br_mcast_stats),
  1533. BRIDGE_XSTATS_PAD);
  1534. if (!nla)
  1535. goto nla_put_failure;
  1536. br_multicast_get_stats(br, p, nla_data(nla));
  1537. }
  1538. #endif
  1539. if (p) {
  1540. nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_STP,
  1541. sizeof(p->stp_xstats),
  1542. BRIDGE_XSTATS_PAD);
  1543. if (!nla)
  1544. goto nla_put_failure;
  1545. spin_lock_bh(&br->lock);
  1546. memcpy(nla_data(nla), &p->stp_xstats, sizeof(p->stp_xstats));
  1547. spin_unlock_bh(&br->lock);
  1548. }
  1549. nla_nest_end(skb, nest);
  1550. *prividx = 0;
  1551. return 0;
  1552. nla_put_failure:
  1553. nla_nest_end(skb, nest);
  1554. *prividx = vl_idx;
  1555. return -EMSGSIZE;
  1556. }
  1557. static struct rtnl_af_ops br_af_ops __read_mostly = {
  1558. .family = AF_BRIDGE,
  1559. .get_link_af_size = br_get_link_af_size_filtered,
  1560. };
  1561. struct rtnl_link_ops br_link_ops __read_mostly = {
  1562. .kind = "bridge",
  1563. .priv_size = sizeof(struct net_bridge),
  1564. .setup = br_dev_setup,
  1565. .maxtype = IFLA_BR_MAX,
  1566. .policy = br_policy,
  1567. .validate = br_validate,
  1568. .newlink = br_dev_newlink,
  1569. .changelink = br_changelink,
  1570. .dellink = br_dev_delete,
  1571. .get_size = br_get_size,
  1572. .fill_info = br_fill_info,
  1573. .fill_linkxstats = br_fill_linkxstats,
  1574. .get_linkxstats_size = br_get_linkxstats_size,
  1575. .slave_maxtype = IFLA_BRPORT_MAX,
  1576. .slave_policy = br_port_policy,
  1577. .slave_changelink = br_port_slave_changelink,
  1578. .get_slave_size = br_port_get_slave_size,
  1579. .fill_slave_info = br_port_fill_slave_info,
  1580. };
  1581. int __init br_netlink_init(void)
  1582. {
  1583. int err;
  1584. br_mdb_init();
  1585. br_vlan_rtnl_init();
  1586. rtnl_af_register(&br_af_ops);
  1587. err = rtnl_link_register(&br_link_ops);
  1588. if (err)
  1589. goto out_af;
  1590. return 0;
  1591. out_af:
  1592. rtnl_af_unregister(&br_af_ops);
  1593. br_mdb_uninit();
  1594. return err;
  1595. }
  1596. void br_netlink_fini(void)
  1597. {
  1598. br_mdb_uninit();
  1599. br_vlan_rtnl_uninit();
  1600. rtnl_af_unregister(&br_af_ops);
  1601. rtnl_link_unregister(&br_link_ops);
  1602. }