br_vlan.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/kernel.h>
  3. #include <linux/netdevice.h>
  4. #include <linux/rtnetlink.h>
  5. #include <linux/slab.h>
  6. #include <net/switchdev.h>
  7. #include "br_private.h"
  8. #include "br_private_tunnel.h"
  9. static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
  10. static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  11. const void *ptr)
  12. {
  13. const struct net_bridge_vlan *vle = ptr;
  14. u16 vid = *(u16 *)arg->key;
  15. return vle->vid != vid;
  16. }
  17. static const struct rhashtable_params br_vlan_rht_params = {
  18. .head_offset = offsetof(struct net_bridge_vlan, vnode),
  19. .key_offset = offsetof(struct net_bridge_vlan, vid),
  20. .key_len = sizeof(u16),
  21. .nelem_hint = 3,
  22. .max_size = VLAN_N_VID,
  23. .obj_cmpfn = br_vlan_cmp,
  24. .automatic_shrinking = true,
  25. };
  26. static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  27. {
  28. return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  29. }
  30. static void __vlan_add_pvid(struct net_bridge_vlan_group *vg,
  31. const struct net_bridge_vlan *v)
  32. {
  33. if (vg->pvid == v->vid)
  34. return;
  35. smp_wmb();
  36. br_vlan_set_pvid_state(vg, v->state);
  37. vg->pvid = v->vid;
  38. }
  39. static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  40. {
  41. if (vg->pvid != vid)
  42. return;
  43. smp_wmb();
  44. vg->pvid = 0;
  45. }
  46. /* Update the BRIDGE_VLAN_INFO_PVID and BRIDGE_VLAN_INFO_UNTAGGED flags of @v.
  47. * If @commit is false, return just whether the BRIDGE_VLAN_INFO_PVID and
  48. * BRIDGE_VLAN_INFO_UNTAGGED bits of @flags would produce any change onto @v.
  49. */
  50. static bool __vlan_flags_update(struct net_bridge_vlan *v, u16 flags,
  51. bool commit)
  52. {
  53. struct net_bridge_vlan_group *vg;
  54. bool change;
  55. if (br_vlan_is_master(v))
  56. vg = br_vlan_group(v->br);
  57. else
  58. vg = nbp_vlan_group(v->port);
  59. /* check if anything would be changed on commit */
  60. change = !!(flags & BRIDGE_VLAN_INFO_PVID) == !!(vg->pvid != v->vid) ||
  61. ((flags ^ v->flags) & BRIDGE_VLAN_INFO_UNTAGGED);
  62. if (!commit)
  63. goto out;
  64. if (flags & BRIDGE_VLAN_INFO_PVID)
  65. __vlan_add_pvid(vg, v);
  66. else
  67. __vlan_delete_pvid(vg, v->vid);
  68. if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  69. v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  70. else
  71. v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
  72. out:
  73. return change;
  74. }
  75. static bool __vlan_flags_would_change(struct net_bridge_vlan *v, u16 flags)
  76. {
  77. return __vlan_flags_update(v, flags, false);
  78. }
  79. static void __vlan_flags_commit(struct net_bridge_vlan *v, u16 flags)
  80. {
  81. __vlan_flags_update(v, flags, true);
  82. }
  83. static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  84. struct net_bridge_vlan *v, u16 flags,
  85. struct netlink_ext_ack *extack)
  86. {
  87. int err;
  88. /* Try switchdev op first. In case it is not supported, fallback to
  89. * 8021q add.
  90. */
  91. err = br_switchdev_port_vlan_add(dev, v->vid, flags, false, extack);
  92. if (err == -EOPNOTSUPP)
  93. return vlan_vid_add(dev, br->vlan_proto, v->vid);
  94. v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
  95. return err;
  96. }
  97. static void __vlan_add_list(struct net_bridge_vlan *v)
  98. {
  99. struct net_bridge_vlan_group *vg;
  100. struct list_head *headp, *hpos;
  101. struct net_bridge_vlan *vent;
  102. if (br_vlan_is_master(v))
  103. vg = br_vlan_group(v->br);
  104. else
  105. vg = nbp_vlan_group(v->port);
  106. headp = &vg->vlan_list;
  107. list_for_each_prev(hpos, headp) {
  108. vent = list_entry(hpos, struct net_bridge_vlan, vlist);
  109. if (v->vid >= vent->vid)
  110. break;
  111. }
  112. list_add_rcu(&v->vlist, hpos);
  113. }
  114. static void __vlan_del_list(struct net_bridge_vlan *v)
  115. {
  116. list_del_rcu(&v->vlist);
  117. }
  118. static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
  119. const struct net_bridge_vlan *v)
  120. {
  121. int err;
  122. /* Try switchdev op first. In case it is not supported, fallback to
  123. * 8021q del.
  124. */
  125. err = br_switchdev_port_vlan_del(dev, v->vid);
  126. if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
  127. vlan_vid_del(dev, br->vlan_proto, v->vid);
  128. return err == -EOPNOTSUPP ? 0 : err;
  129. }
  130. /* Returns a master vlan, if it didn't exist it gets created. In all cases
  131. * a reference is taken to the master vlan before returning.
  132. */
  133. static struct net_bridge_vlan *
  134. br_vlan_get_master(struct net_bridge *br, u16 vid,
  135. struct netlink_ext_ack *extack)
  136. {
  137. struct net_bridge_vlan_group *vg;
  138. struct net_bridge_vlan *masterv;
  139. vg = br_vlan_group(br);
  140. masterv = br_vlan_find(vg, vid);
  141. if (!masterv) {
  142. bool changed;
  143. /* missing global ctx, create it now */
  144. if (br_vlan_add(br, vid, 0, &changed, extack))
  145. return NULL;
  146. masterv = br_vlan_find(vg, vid);
  147. if (WARN_ON(!masterv))
  148. return NULL;
  149. refcount_set(&masterv->refcnt, 1);
  150. return masterv;
  151. }
  152. refcount_inc(&masterv->refcnt);
  153. return masterv;
  154. }
  155. static void br_master_vlan_rcu_free(struct rcu_head *rcu)
  156. {
  157. struct net_bridge_vlan *v;
  158. v = container_of(rcu, struct net_bridge_vlan, rcu);
  159. WARN_ON(!br_vlan_is_master(v));
  160. free_percpu(v->stats);
  161. v->stats = NULL;
  162. kfree(v);
  163. }
  164. static void br_vlan_put_master(struct net_bridge_vlan *masterv)
  165. {
  166. struct net_bridge_vlan_group *vg;
  167. if (!br_vlan_is_master(masterv))
  168. return;
  169. vg = br_vlan_group(masterv->br);
  170. if (refcount_dec_and_test(&masterv->refcnt)) {
  171. rhashtable_remove_fast(&vg->vlan_hash,
  172. &masterv->vnode, br_vlan_rht_params);
  173. __vlan_del_list(masterv);
  174. br_multicast_toggle_one_vlan(masterv, false);
  175. br_multicast_ctx_deinit(&masterv->br_mcast_ctx);
  176. call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
  177. }
  178. }
  179. static void nbp_vlan_rcu_free(struct rcu_head *rcu)
  180. {
  181. struct net_bridge_vlan *v;
  182. v = container_of(rcu, struct net_bridge_vlan, rcu);
  183. WARN_ON(br_vlan_is_master(v));
  184. /* if we had per-port stats configured then free them here */
  185. if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
  186. free_percpu(v->stats);
  187. v->stats = NULL;
  188. kfree(v);
  189. }
  190. static void br_vlan_init_state(struct net_bridge_vlan *v)
  191. {
  192. struct net_bridge *br;
  193. if (br_vlan_is_master(v))
  194. br = v->br;
  195. else
  196. br = v->port->br;
  197. if (br_opt_get(br, BROPT_MST_ENABLED)) {
  198. br_mst_vlan_init_state(v);
  199. return;
  200. }
  201. v->state = BR_STATE_FORWARDING;
  202. v->msti = 0;
  203. }
  204. /* This is the shared VLAN add function which works for both ports and bridge
  205. * devices. There are four possible calls to this function in terms of the
  206. * vlan entry type:
  207. * 1. vlan is being added on a port (no master flags, global entry exists)
  208. * 2. vlan is being added on a bridge (both master and brentry flags)
  209. * 3. vlan is being added on a port, but a global entry didn't exist which
  210. * is being created right now (master flag set, brentry flag unset), the
  211. * global entry is used for global per-vlan features, but not for filtering
  212. * 4. same as 3 but with both master and brentry flags set so the entry
  213. * will be used for filtering in both the port and the bridge
  214. */
  215. static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
  216. struct netlink_ext_ack *extack)
  217. {
  218. struct net_bridge_vlan *masterv = NULL;
  219. struct net_bridge_port *p = NULL;
  220. struct net_bridge_vlan_group *vg;
  221. struct net_device *dev;
  222. struct net_bridge *br;
  223. int err;
  224. if (br_vlan_is_master(v)) {
  225. br = v->br;
  226. dev = br->dev;
  227. vg = br_vlan_group(br);
  228. } else {
  229. p = v->port;
  230. br = p->br;
  231. dev = p->dev;
  232. vg = nbp_vlan_group(p);
  233. }
  234. if (p) {
  235. /* Add VLAN to the device filter if it is supported.
  236. * This ensures tagged traffic enters the bridge when
  237. * promiscuous mode is disabled by br_manage_promisc().
  238. */
  239. err = __vlan_vid_add(dev, br, v, flags, extack);
  240. if (err)
  241. goto out;
  242. /* need to work on the master vlan too */
  243. if (flags & BRIDGE_VLAN_INFO_MASTER) {
  244. bool changed;
  245. err = br_vlan_add(br, v->vid,
  246. flags | BRIDGE_VLAN_INFO_BRENTRY,
  247. &changed, extack);
  248. if (err)
  249. goto out_filt;
  250. if (changed)
  251. br_vlan_notify(br, NULL, v->vid, 0,
  252. RTM_NEWVLAN);
  253. }
  254. masterv = br_vlan_get_master(br, v->vid, extack);
  255. if (!masterv) {
  256. err = -ENOMEM;
  257. goto out_filt;
  258. }
  259. v->brvlan = masterv;
  260. if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
  261. v->stats =
  262. netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
  263. if (!v->stats) {
  264. err = -ENOMEM;
  265. goto out_filt;
  266. }
  267. v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
  268. } else {
  269. v->stats = masterv->stats;
  270. }
  271. br_multicast_port_ctx_init(p, v, &v->port_mcast_ctx);
  272. } else {
  273. if (br_vlan_should_use(v)) {
  274. err = br_switchdev_port_vlan_add(dev, v->vid, flags,
  275. false, extack);
  276. if (err && err != -EOPNOTSUPP)
  277. goto out;
  278. }
  279. br_multicast_ctx_init(br, v, &v->br_mcast_ctx);
  280. v->priv_flags |= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
  281. }
  282. /* Add the dev mac and count the vlan only if it's usable */
  283. if (br_vlan_should_use(v)) {
  284. err = br_fdb_add_local(br, p, dev->dev_addr, v->vid);
  285. if (err) {
  286. br_err(br, "failed insert local address into bridge forwarding table\n");
  287. goto out_filt;
  288. }
  289. vg->num_vlans++;
  290. }
  291. /* set the state before publishing */
  292. br_vlan_init_state(v);
  293. err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
  294. br_vlan_rht_params);
  295. if (err)
  296. goto out_fdb_insert;
  297. __vlan_add_list(v);
  298. __vlan_flags_commit(v, flags);
  299. br_multicast_toggle_one_vlan(v, true);
  300. if (p)
  301. nbp_vlan_set_vlan_dev_state(p, v->vid);
  302. out:
  303. return err;
  304. out_fdb_insert:
  305. if (br_vlan_should_use(v)) {
  306. br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
  307. vg->num_vlans--;
  308. }
  309. out_filt:
  310. if (p) {
  311. __vlan_vid_del(dev, br, v);
  312. if (masterv) {
  313. if (v->stats && masterv->stats != v->stats)
  314. free_percpu(v->stats);
  315. v->stats = NULL;
  316. br_vlan_put_master(masterv);
  317. v->brvlan = NULL;
  318. }
  319. } else {
  320. br_switchdev_port_vlan_del(dev, v->vid);
  321. }
  322. goto out;
  323. }
  324. static int __vlan_del(struct net_bridge_vlan *v)
  325. {
  326. struct net_bridge_vlan *masterv = v;
  327. struct net_bridge_vlan_group *vg;
  328. struct net_bridge_port *p = NULL;
  329. int err = 0;
  330. if (br_vlan_is_master(v)) {
  331. vg = br_vlan_group(v->br);
  332. } else {
  333. p = v->port;
  334. vg = nbp_vlan_group(v->port);
  335. masterv = v->brvlan;
  336. }
  337. __vlan_delete_pvid(vg, v->vid);
  338. if (p) {
  339. err = __vlan_vid_del(p->dev, p->br, v);
  340. if (err)
  341. goto out;
  342. } else {
  343. err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
  344. if (err && err != -EOPNOTSUPP)
  345. goto out;
  346. err = 0;
  347. }
  348. if (br_vlan_should_use(v)) {
  349. v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
  350. vg->num_vlans--;
  351. }
  352. if (masterv != v) {
  353. vlan_tunnel_info_del(vg, v);
  354. rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
  355. br_vlan_rht_params);
  356. __vlan_del_list(v);
  357. nbp_vlan_set_vlan_dev_state(p, v->vid);
  358. br_multicast_toggle_one_vlan(v, false);
  359. br_multicast_port_ctx_deinit(&v->port_mcast_ctx);
  360. call_rcu(&v->rcu, nbp_vlan_rcu_free);
  361. }
  362. br_vlan_put_master(masterv);
  363. out:
  364. return err;
  365. }
  366. static void __vlan_group_free(struct net_bridge_vlan_group *vg)
  367. {
  368. WARN_ON(!list_empty(&vg->vlan_list));
  369. rhashtable_destroy(&vg->vlan_hash);
  370. vlan_tunnel_deinit(vg);
  371. kfree(vg);
  372. }
  373. static void __vlan_flush(const struct net_bridge *br,
  374. const struct net_bridge_port *p,
  375. struct net_bridge_vlan_group *vg)
  376. {
  377. struct net_bridge_vlan *vlan, *tmp;
  378. u16 v_start = 0, v_end = 0;
  379. int err;
  380. __vlan_delete_pvid(vg, vg->pvid);
  381. list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
  382. /* take care of disjoint ranges */
  383. if (!v_start) {
  384. v_start = vlan->vid;
  385. } else if (vlan->vid - v_end != 1) {
  386. /* found range end, notify and start next one */
  387. br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
  388. v_start = vlan->vid;
  389. }
  390. v_end = vlan->vid;
  391. err = __vlan_del(vlan);
  392. if (err) {
  393. br_err(br,
  394. "port %u(%s) failed to delete vlan %d: %pe\n",
  395. (unsigned int) p->port_no, p->dev->name,
  396. vlan->vid, ERR_PTR(err));
  397. }
  398. }
  399. /* notify about the last/whole vlan range */
  400. if (v_start)
  401. br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
  402. }
  403. struct sk_buff *br_handle_vlan(struct net_bridge *br,
  404. const struct net_bridge_port *p,
  405. struct net_bridge_vlan_group *vg,
  406. struct sk_buff *skb)
  407. {
  408. struct pcpu_sw_netstats *stats;
  409. struct net_bridge_vlan *v;
  410. u16 vid;
  411. /* If this packet was not filtered at input, let it pass */
  412. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  413. goto out;
  414. /* At this point, we know that the frame was filtered and contains
  415. * a valid vlan id. If the vlan id has untagged flag set,
  416. * send untagged; otherwise, send tagged.
  417. */
  418. br_vlan_get_tag(skb, &vid);
  419. v = br_vlan_find(vg, vid);
  420. /* Vlan entry must be configured at this point. The
  421. * only exception is the bridge is set in promisc mode and the
  422. * packet is destined for the bridge device. In this case
  423. * pass the packet as is.
  424. */
  425. if (!v || !br_vlan_should_use(v)) {
  426. if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
  427. goto out;
  428. } else {
  429. kfree_skb(skb);
  430. return NULL;
  431. }
  432. }
  433. if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
  434. stats = this_cpu_ptr(v->stats);
  435. u64_stats_update_begin(&stats->syncp);
  436. u64_stats_add(&stats->tx_bytes, skb->len);
  437. u64_stats_inc(&stats->tx_packets);
  438. u64_stats_update_end(&stats->syncp);
  439. }
  440. /* If the skb will be sent using forwarding offload, the assumption is
  441. * that the switchdev will inject the packet into hardware together
  442. * with the bridge VLAN, so that it can be forwarded according to that
  443. * VLAN. The switchdev should deal with popping the VLAN header in
  444. * hardware on each egress port as appropriate. So only strip the VLAN
  445. * header if forwarding offload is not being used.
  446. */
  447. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED &&
  448. !br_switchdev_frame_uses_tx_fwd_offload(skb))
  449. __vlan_hwaccel_clear_tag(skb);
  450. if (p && (p->flags & BR_VLAN_TUNNEL) &&
  451. br_handle_egress_vlan_tunnel(skb, v)) {
  452. kfree_skb(skb);
  453. return NULL;
  454. }
  455. out:
  456. return skb;
  457. }
  458. /* Called under RCU */
  459. static bool __allowed_ingress(const struct net_bridge *br,
  460. struct net_bridge_vlan_group *vg,
  461. struct sk_buff *skb, u16 *vid,
  462. u8 *state,
  463. struct net_bridge_vlan **vlan)
  464. {
  465. struct pcpu_sw_netstats *stats;
  466. struct net_bridge_vlan *v;
  467. bool tagged;
  468. BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
  469. /* If vlan tx offload is disabled on bridge device and frame was
  470. * sent from vlan device on the bridge device, it does not have
  471. * HW accelerated vlan tag.
  472. */
  473. if (unlikely(!skb_vlan_tag_present(skb) &&
  474. skb->protocol == br->vlan_proto)) {
  475. skb = skb_vlan_untag(skb);
  476. if (unlikely(!skb))
  477. return false;
  478. }
  479. if (!br_vlan_get_tag(skb, vid)) {
  480. /* Tagged frame */
  481. if (skb->vlan_proto != br->vlan_proto) {
  482. /* Protocol-mismatch, empty out vlan_tci for new tag */
  483. skb_push(skb, ETH_HLEN);
  484. skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
  485. skb_vlan_tag_get(skb));
  486. if (unlikely(!skb))
  487. return false;
  488. skb_pull(skb, ETH_HLEN);
  489. skb_reset_mac_len(skb);
  490. *vid = 0;
  491. tagged = false;
  492. } else {
  493. tagged = true;
  494. }
  495. } else {
  496. /* Untagged frame */
  497. tagged = false;
  498. }
  499. if (!*vid) {
  500. u16 pvid = br_get_pvid(vg);
  501. /* Frame had a tag with VID 0 or did not have a tag.
  502. * See if pvid is set on this port. That tells us which
  503. * vlan untagged or priority-tagged traffic belongs to.
  504. */
  505. if (!pvid)
  506. goto drop;
  507. /* PVID is set on this port. Any untagged or priority-tagged
  508. * ingress frame is considered to belong to this vlan.
  509. */
  510. *vid = pvid;
  511. if (likely(!tagged))
  512. /* Untagged Frame. */
  513. __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
  514. else
  515. /* Priority-tagged Frame.
  516. * At this point, we know that skb->vlan_tci VID
  517. * field was 0.
  518. * We update only VID field and preserve PCP field.
  519. */
  520. skb->vlan_tci |= pvid;
  521. /* if snooping and stats are disabled we can avoid the lookup */
  522. if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) &&
  523. !br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
  524. if (*state == BR_STATE_FORWARDING) {
  525. *state = br_vlan_get_pvid_state(vg);
  526. if (!br_vlan_state_allowed(*state, true))
  527. goto drop;
  528. }
  529. return true;
  530. }
  531. }
  532. v = br_vlan_find(vg, *vid);
  533. if (!v || !br_vlan_should_use(v))
  534. goto drop;
  535. if (*state == BR_STATE_FORWARDING) {
  536. *state = br_vlan_get_state(v);
  537. if (!br_vlan_state_allowed(*state, true))
  538. goto drop;
  539. }
  540. if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
  541. stats = this_cpu_ptr(v->stats);
  542. u64_stats_update_begin(&stats->syncp);
  543. u64_stats_add(&stats->rx_bytes, skb->len);
  544. u64_stats_inc(&stats->rx_packets);
  545. u64_stats_update_end(&stats->syncp);
  546. }
  547. *vlan = v;
  548. return true;
  549. drop:
  550. kfree_skb(skb);
  551. return false;
  552. }
  553. bool br_allowed_ingress(const struct net_bridge *br,
  554. struct net_bridge_vlan_group *vg, struct sk_buff *skb,
  555. u16 *vid, u8 *state,
  556. struct net_bridge_vlan **vlan)
  557. {
  558. /* If VLAN filtering is disabled on the bridge, all packets are
  559. * permitted.
  560. */
  561. *vlan = NULL;
  562. if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
  563. BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
  564. return true;
  565. }
  566. return __allowed_ingress(br, vg, skb, vid, state, vlan);
  567. }
  568. /* Called under RCU. */
  569. bool br_allowed_egress(struct net_bridge_vlan_group *vg,
  570. const struct sk_buff *skb)
  571. {
  572. const struct net_bridge_vlan *v;
  573. u16 vid;
  574. /* If this packet was not filtered at input, let it pass */
  575. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  576. return true;
  577. br_vlan_get_tag(skb, &vid);
  578. v = br_vlan_find(vg, vid);
  579. if (v && br_vlan_should_use(v) &&
  580. br_vlan_state_allowed(br_vlan_get_state(v), false))
  581. return true;
  582. return false;
  583. }
  584. /* Called under RCU */
  585. bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
  586. {
  587. struct net_bridge_vlan_group *vg;
  588. struct net_bridge *br = p->br;
  589. struct net_bridge_vlan *v;
  590. /* If filtering was disabled at input, let it pass. */
  591. if (!br_opt_get(br, BROPT_VLAN_ENABLED))
  592. return true;
  593. vg = nbp_vlan_group_rcu(p);
  594. if (!vg || !vg->num_vlans)
  595. return false;
  596. if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
  597. *vid = 0;
  598. if (!*vid) {
  599. *vid = br_get_pvid(vg);
  600. if (!*vid ||
  601. !br_vlan_state_allowed(br_vlan_get_pvid_state(vg), true))
  602. return false;
  603. return true;
  604. }
  605. v = br_vlan_find(vg, *vid);
  606. if (v && br_vlan_state_allowed(br_vlan_get_state(v), true))
  607. return true;
  608. return false;
  609. }
  610. static int br_vlan_add_existing(struct net_bridge *br,
  611. struct net_bridge_vlan_group *vg,
  612. struct net_bridge_vlan *vlan,
  613. u16 flags, bool *changed,
  614. struct netlink_ext_ack *extack)
  615. {
  616. bool would_change = __vlan_flags_would_change(vlan, flags);
  617. bool becomes_brentry = false;
  618. int err;
  619. if (!br_vlan_is_brentry(vlan)) {
  620. /* Trying to change flags of non-existent bridge vlan */
  621. if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
  622. return -EINVAL;
  623. becomes_brentry = true;
  624. }
  625. /* Master VLANs that aren't brentries weren't notified before,
  626. * time to notify them now.
  627. */
  628. if (becomes_brentry || would_change) {
  629. err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags,
  630. would_change, extack);
  631. if (err && err != -EOPNOTSUPP)
  632. return err;
  633. }
  634. if (becomes_brentry) {
  635. /* It was only kept for port vlans, now make it real */
  636. err = br_fdb_add_local(br, NULL, br->dev->dev_addr, vlan->vid);
  637. if (err) {
  638. br_err(br, "failed to insert local address into bridge forwarding table\n");
  639. goto err_fdb_insert;
  640. }
  641. refcount_inc(&vlan->refcnt);
  642. vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
  643. vg->num_vlans++;
  644. *changed = true;
  645. br_multicast_toggle_one_vlan(vlan, true);
  646. }
  647. __vlan_flags_commit(vlan, flags);
  648. if (would_change)
  649. *changed = true;
  650. return 0;
  651. err_fdb_insert:
  652. br_switchdev_port_vlan_del(br->dev, vlan->vid);
  653. return err;
  654. }
  655. /* Must be protected by RTNL.
  656. * Must be called with vid in range from 1 to 4094 inclusive.
  657. * changed must be true only if the vlan was created or updated
  658. */
  659. int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
  660. struct netlink_ext_ack *extack)
  661. {
  662. struct net_bridge_vlan_group *vg;
  663. struct net_bridge_vlan *vlan;
  664. int ret;
  665. ASSERT_RTNL();
  666. *changed = false;
  667. vg = br_vlan_group(br);
  668. vlan = br_vlan_find(vg, vid);
  669. if (vlan)
  670. return br_vlan_add_existing(br, vg, vlan, flags, changed,
  671. extack);
  672. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  673. if (!vlan)
  674. return -ENOMEM;
  675. vlan->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
  676. if (!vlan->stats) {
  677. kfree(vlan);
  678. return -ENOMEM;
  679. }
  680. vlan->vid = vid;
  681. vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
  682. vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
  683. vlan->br = br;
  684. if (flags & BRIDGE_VLAN_INFO_BRENTRY)
  685. refcount_set(&vlan->refcnt, 1);
  686. ret = __vlan_add(vlan, flags, extack);
  687. if (ret) {
  688. free_percpu(vlan->stats);
  689. kfree(vlan);
  690. } else {
  691. *changed = true;
  692. }
  693. return ret;
  694. }
  695. /* Must be protected by RTNL.
  696. * Must be called with vid in range from 1 to 4094 inclusive.
  697. */
  698. int br_vlan_delete(struct net_bridge *br, u16 vid)
  699. {
  700. struct net_bridge_vlan_group *vg;
  701. struct net_bridge_vlan *v;
  702. ASSERT_RTNL();
  703. vg = br_vlan_group(br);
  704. v = br_vlan_find(vg, vid);
  705. if (!v || !br_vlan_is_brentry(v))
  706. return -ENOENT;
  707. br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
  708. br_fdb_delete_by_port(br, NULL, vid, 0);
  709. vlan_tunnel_info_del(vg, v);
  710. return __vlan_del(v);
  711. }
  712. void br_vlan_flush(struct net_bridge *br)
  713. {
  714. struct net_bridge_vlan_group *vg;
  715. ASSERT_RTNL();
  716. vg = br_vlan_group(br);
  717. __vlan_flush(br, NULL, vg);
  718. RCU_INIT_POINTER(br->vlgrp, NULL);
  719. synchronize_rcu();
  720. __vlan_group_free(vg);
  721. }
  722. struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
  723. {
  724. if (!vg)
  725. return NULL;
  726. return br_vlan_lookup(&vg->vlan_hash, vid);
  727. }
  728. /* Must be protected by RTNL. */
  729. static void recalculate_group_addr(struct net_bridge *br)
  730. {
  731. if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
  732. return;
  733. spin_lock_bh(&br->lock);
  734. if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
  735. br->vlan_proto == htons(ETH_P_8021Q)) {
  736. /* Bridge Group Address */
  737. br->group_addr[5] = 0x00;
  738. } else { /* vlan_enabled && ETH_P_8021AD */
  739. /* Provider Bridge Group Address */
  740. br->group_addr[5] = 0x08;
  741. }
  742. spin_unlock_bh(&br->lock);
  743. }
  744. /* Must be protected by RTNL. */
  745. void br_recalculate_fwd_mask(struct net_bridge *br)
  746. {
  747. if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
  748. br->vlan_proto == htons(ETH_P_8021Q))
  749. br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
  750. else /* vlan_enabled && ETH_P_8021AD */
  751. br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
  752. ~(1u << br->group_addr[5]);
  753. }
  754. int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val,
  755. struct netlink_ext_ack *extack)
  756. {
  757. struct switchdev_attr attr = {
  758. .orig_dev = br->dev,
  759. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  760. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  761. .u.vlan_filtering = val,
  762. };
  763. int err;
  764. if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
  765. return 0;
  766. br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
  767. err = switchdev_port_attr_set(br->dev, &attr, extack);
  768. if (err && err != -EOPNOTSUPP) {
  769. br_opt_toggle(br, BROPT_VLAN_ENABLED, !val);
  770. return err;
  771. }
  772. br_manage_promisc(br);
  773. recalculate_group_addr(br);
  774. br_recalculate_fwd_mask(br);
  775. if (!val && br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
  776. br_info(br, "vlan filtering disabled, automatically disabling multicast vlan snooping\n");
  777. br_multicast_toggle_vlan_snooping(br, false, NULL);
  778. }
  779. return 0;
  780. }
  781. bool br_vlan_enabled(const struct net_device *dev)
  782. {
  783. struct net_bridge *br = netdev_priv(dev);
  784. return br_opt_get(br, BROPT_VLAN_ENABLED);
  785. }
  786. EXPORT_SYMBOL_GPL(br_vlan_enabled);
  787. int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
  788. {
  789. struct net_bridge *br = netdev_priv(dev);
  790. *p_proto = ntohs(br->vlan_proto);
  791. return 0;
  792. }
  793. EXPORT_SYMBOL_GPL(br_vlan_get_proto);
  794. int __br_vlan_set_proto(struct net_bridge *br, __be16 proto,
  795. struct netlink_ext_ack *extack)
  796. {
  797. struct switchdev_attr attr = {
  798. .orig_dev = br->dev,
  799. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
  800. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  801. .u.vlan_protocol = ntohs(proto),
  802. };
  803. int err = 0;
  804. struct net_bridge_port *p;
  805. struct net_bridge_vlan *vlan;
  806. struct net_bridge_vlan_group *vg;
  807. __be16 oldproto = br->vlan_proto;
  808. if (br->vlan_proto == proto)
  809. return 0;
  810. err = switchdev_port_attr_set(br->dev, &attr, extack);
  811. if (err && err != -EOPNOTSUPP)
  812. return err;
  813. /* Add VLANs for the new proto to the device filter. */
  814. list_for_each_entry(p, &br->port_list, list) {
  815. vg = nbp_vlan_group(p);
  816. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  817. if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
  818. continue;
  819. err = vlan_vid_add(p->dev, proto, vlan->vid);
  820. if (err)
  821. goto err_filt;
  822. }
  823. }
  824. br->vlan_proto = proto;
  825. recalculate_group_addr(br);
  826. br_recalculate_fwd_mask(br);
  827. /* Delete VLANs for the old proto from the device filter. */
  828. list_for_each_entry(p, &br->port_list, list) {
  829. vg = nbp_vlan_group(p);
  830. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  831. if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
  832. continue;
  833. vlan_vid_del(p->dev, oldproto, vlan->vid);
  834. }
  835. }
  836. return 0;
  837. err_filt:
  838. attr.u.vlan_protocol = ntohs(oldproto);
  839. switchdev_port_attr_set(br->dev, &attr, NULL);
  840. list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist) {
  841. if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
  842. continue;
  843. vlan_vid_del(p->dev, proto, vlan->vid);
  844. }
  845. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  846. vg = nbp_vlan_group(p);
  847. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  848. if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
  849. continue;
  850. vlan_vid_del(p->dev, proto, vlan->vid);
  851. }
  852. }
  853. return err;
  854. }
  855. int br_vlan_set_proto(struct net_bridge *br, unsigned long val,
  856. struct netlink_ext_ack *extack)
  857. {
  858. if (!eth_type_vlan(htons(val)))
  859. return -EPROTONOSUPPORT;
  860. return __br_vlan_set_proto(br, htons(val), extack);
  861. }
  862. int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
  863. {
  864. switch (val) {
  865. case 0:
  866. case 1:
  867. br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
  868. break;
  869. default:
  870. return -EINVAL;
  871. }
  872. return 0;
  873. }
  874. int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
  875. {
  876. struct net_bridge_port *p;
  877. /* allow to change the option if there are no port vlans configured */
  878. list_for_each_entry(p, &br->port_list, list) {
  879. struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
  880. if (vg->num_vlans)
  881. return -EBUSY;
  882. }
  883. switch (val) {
  884. case 0:
  885. case 1:
  886. br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
  887. break;
  888. default:
  889. return -EINVAL;
  890. }
  891. return 0;
  892. }
  893. static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  894. {
  895. struct net_bridge_vlan *v;
  896. if (vid != vg->pvid)
  897. return false;
  898. v = br_vlan_lookup(&vg->vlan_hash, vid);
  899. if (v && br_vlan_should_use(v) &&
  900. (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
  901. return true;
  902. return false;
  903. }
  904. static void br_vlan_disable_default_pvid(struct net_bridge *br)
  905. {
  906. struct net_bridge_port *p;
  907. u16 pvid = br->default_pvid;
  908. /* Disable default_pvid on all ports where it is still
  909. * configured.
  910. */
  911. if (vlan_default_pvid(br_vlan_group(br), pvid)) {
  912. if (!br_vlan_delete(br, pvid))
  913. br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
  914. }
  915. list_for_each_entry(p, &br->port_list, list) {
  916. if (vlan_default_pvid(nbp_vlan_group(p), pvid) &&
  917. !nbp_vlan_delete(p, pvid))
  918. br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
  919. }
  920. br->default_pvid = 0;
  921. }
  922. int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
  923. struct netlink_ext_ack *extack)
  924. {
  925. const struct net_bridge_vlan *pvent;
  926. struct net_bridge_vlan_group *vg;
  927. struct net_bridge_port *p;
  928. unsigned long *changed;
  929. bool vlchange;
  930. u16 old_pvid;
  931. int err = 0;
  932. if (!pvid) {
  933. br_vlan_disable_default_pvid(br);
  934. return 0;
  935. }
  936. changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
  937. if (!changed)
  938. return -ENOMEM;
  939. old_pvid = br->default_pvid;
  940. /* Update default_pvid config only if we do not conflict with
  941. * user configuration.
  942. */
  943. vg = br_vlan_group(br);
  944. pvent = br_vlan_find(vg, pvid);
  945. if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
  946. (!pvent || !br_vlan_should_use(pvent))) {
  947. err = br_vlan_add(br, pvid,
  948. BRIDGE_VLAN_INFO_PVID |
  949. BRIDGE_VLAN_INFO_UNTAGGED |
  950. BRIDGE_VLAN_INFO_BRENTRY,
  951. &vlchange, extack);
  952. if (err)
  953. goto out;
  954. if (br_vlan_delete(br, old_pvid))
  955. br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
  956. br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
  957. __set_bit(0, changed);
  958. }
  959. list_for_each_entry(p, &br->port_list, list) {
  960. /* Update default_pvid config only if we do not conflict with
  961. * user configuration.
  962. */
  963. vg = nbp_vlan_group(p);
  964. if ((old_pvid &&
  965. !vlan_default_pvid(vg, old_pvid)) ||
  966. br_vlan_find(vg, pvid))
  967. continue;
  968. err = nbp_vlan_add(p, pvid,
  969. BRIDGE_VLAN_INFO_PVID |
  970. BRIDGE_VLAN_INFO_UNTAGGED,
  971. &vlchange, extack);
  972. if (err)
  973. goto err_port;
  974. if (nbp_vlan_delete(p, old_pvid))
  975. br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
  976. br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
  977. __set_bit(p->port_no, changed);
  978. }
  979. br->default_pvid = pvid;
  980. out:
  981. bitmap_free(changed);
  982. return err;
  983. err_port:
  984. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  985. if (!test_bit(p->port_no, changed))
  986. continue;
  987. if (old_pvid) {
  988. nbp_vlan_add(p, old_pvid,
  989. BRIDGE_VLAN_INFO_PVID |
  990. BRIDGE_VLAN_INFO_UNTAGGED,
  991. &vlchange, NULL);
  992. br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN);
  993. }
  994. nbp_vlan_delete(p, pvid);
  995. br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
  996. }
  997. if (test_bit(0, changed)) {
  998. if (old_pvid) {
  999. br_vlan_add(br, old_pvid,
  1000. BRIDGE_VLAN_INFO_PVID |
  1001. BRIDGE_VLAN_INFO_UNTAGGED |
  1002. BRIDGE_VLAN_INFO_BRENTRY,
  1003. &vlchange, NULL);
  1004. br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN);
  1005. }
  1006. br_vlan_delete(br, pvid);
  1007. br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
  1008. }
  1009. goto out;
  1010. }
  1011. int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val,
  1012. struct netlink_ext_ack *extack)
  1013. {
  1014. u16 pvid = val;
  1015. int err = 0;
  1016. if (val >= VLAN_VID_MASK)
  1017. return -EINVAL;
  1018. if (pvid == br->default_pvid)
  1019. goto out;
  1020. /* Only allow default pvid change when filtering is disabled */
  1021. if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
  1022. pr_info_once("Please disable vlan filtering to change default_pvid\n");
  1023. err = -EPERM;
  1024. goto out;
  1025. }
  1026. err = __br_vlan_set_default_pvid(br, pvid, extack);
  1027. out:
  1028. return err;
  1029. }
  1030. int br_vlan_init(struct net_bridge *br)
  1031. {
  1032. struct net_bridge_vlan_group *vg;
  1033. int ret = -ENOMEM;
  1034. vg = kzalloc(sizeof(*vg), GFP_KERNEL);
  1035. if (!vg)
  1036. goto out;
  1037. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  1038. if (ret)
  1039. goto err_rhtbl;
  1040. ret = vlan_tunnel_init(vg);
  1041. if (ret)
  1042. goto err_tunnel_init;
  1043. INIT_LIST_HEAD(&vg->vlan_list);
  1044. br->vlan_proto = htons(ETH_P_8021Q);
  1045. br->default_pvid = 1;
  1046. rcu_assign_pointer(br->vlgrp, vg);
  1047. out:
  1048. return ret;
  1049. err_tunnel_init:
  1050. rhashtable_destroy(&vg->vlan_hash);
  1051. err_rhtbl:
  1052. kfree(vg);
  1053. goto out;
  1054. }
  1055. int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
  1056. {
  1057. struct switchdev_attr attr = {
  1058. .orig_dev = p->br->dev,
  1059. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  1060. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  1061. .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
  1062. };
  1063. struct net_bridge_vlan_group *vg;
  1064. int ret = -ENOMEM;
  1065. vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
  1066. if (!vg)
  1067. goto out;
  1068. ret = switchdev_port_attr_set(p->dev, &attr, extack);
  1069. if (ret && ret != -EOPNOTSUPP)
  1070. goto err_vlan_enabled;
  1071. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  1072. if (ret)
  1073. goto err_rhtbl;
  1074. ret = vlan_tunnel_init(vg);
  1075. if (ret)
  1076. goto err_tunnel_init;
  1077. INIT_LIST_HEAD(&vg->vlan_list);
  1078. rcu_assign_pointer(p->vlgrp, vg);
  1079. if (p->br->default_pvid) {
  1080. bool changed;
  1081. ret = nbp_vlan_add(p, p->br->default_pvid,
  1082. BRIDGE_VLAN_INFO_PVID |
  1083. BRIDGE_VLAN_INFO_UNTAGGED,
  1084. &changed, extack);
  1085. if (ret)
  1086. goto err_vlan_add;
  1087. br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN);
  1088. }
  1089. out:
  1090. return ret;
  1091. err_vlan_add:
  1092. RCU_INIT_POINTER(p->vlgrp, NULL);
  1093. synchronize_rcu();
  1094. vlan_tunnel_deinit(vg);
  1095. err_tunnel_init:
  1096. rhashtable_destroy(&vg->vlan_hash);
  1097. err_rhtbl:
  1098. err_vlan_enabled:
  1099. kfree(vg);
  1100. goto out;
  1101. }
  1102. /* Must be protected by RTNL.
  1103. * Must be called with vid in range from 1 to 4094 inclusive.
  1104. * changed must be true only if the vlan was created or updated
  1105. */
  1106. int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
  1107. bool *changed, struct netlink_ext_ack *extack)
  1108. {
  1109. struct net_bridge_vlan *vlan;
  1110. int ret;
  1111. ASSERT_RTNL();
  1112. *changed = false;
  1113. vlan = br_vlan_find(nbp_vlan_group(port), vid);
  1114. if (vlan) {
  1115. bool would_change = __vlan_flags_would_change(vlan, flags);
  1116. if (would_change) {
  1117. /* Pass the flags to the hardware bridge */
  1118. ret = br_switchdev_port_vlan_add(port->dev, vid, flags,
  1119. true, extack);
  1120. if (ret && ret != -EOPNOTSUPP)
  1121. return ret;
  1122. }
  1123. __vlan_flags_commit(vlan, flags);
  1124. *changed = would_change;
  1125. return 0;
  1126. }
  1127. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  1128. if (!vlan)
  1129. return -ENOMEM;
  1130. vlan->vid = vid;
  1131. vlan->port = port;
  1132. ret = __vlan_add(vlan, flags, extack);
  1133. if (ret)
  1134. kfree(vlan);
  1135. else
  1136. *changed = true;
  1137. return ret;
  1138. }
  1139. /* Must be protected by RTNL.
  1140. * Must be called with vid in range from 1 to 4094 inclusive.
  1141. */
  1142. int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
  1143. {
  1144. struct net_bridge_vlan *v;
  1145. ASSERT_RTNL();
  1146. v = br_vlan_find(nbp_vlan_group(port), vid);
  1147. if (!v)
  1148. return -ENOENT;
  1149. br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
  1150. br_fdb_delete_by_port(port->br, port, vid, 0);
  1151. return __vlan_del(v);
  1152. }
  1153. void nbp_vlan_flush(struct net_bridge_port *port)
  1154. {
  1155. struct net_bridge_vlan_group *vg;
  1156. ASSERT_RTNL();
  1157. vg = nbp_vlan_group(port);
  1158. __vlan_flush(port->br, port, vg);
  1159. RCU_INIT_POINTER(port->vlgrp, NULL);
  1160. synchronize_rcu();
  1161. __vlan_group_free(vg);
  1162. }
  1163. void br_vlan_get_stats(const struct net_bridge_vlan *v,
  1164. struct pcpu_sw_netstats *stats)
  1165. {
  1166. int i;
  1167. memset(stats, 0, sizeof(*stats));
  1168. for_each_possible_cpu(i) {
  1169. u64 rxpackets, rxbytes, txpackets, txbytes;
  1170. struct pcpu_sw_netstats *cpu_stats;
  1171. unsigned int start;
  1172. cpu_stats = per_cpu_ptr(v->stats, i);
  1173. do {
  1174. start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
  1175. rxpackets = u64_stats_read(&cpu_stats->rx_packets);
  1176. rxbytes = u64_stats_read(&cpu_stats->rx_bytes);
  1177. txbytes = u64_stats_read(&cpu_stats->tx_bytes);
  1178. txpackets = u64_stats_read(&cpu_stats->tx_packets);
  1179. } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
  1180. u64_stats_add(&stats->rx_packets, rxpackets);
  1181. u64_stats_add(&stats->rx_bytes, rxbytes);
  1182. u64_stats_add(&stats->tx_bytes, txbytes);
  1183. u64_stats_add(&stats->tx_packets, txpackets);
  1184. }
  1185. }
  1186. int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
  1187. {
  1188. struct net_bridge_vlan_group *vg;
  1189. struct net_bridge_port *p;
  1190. ASSERT_RTNL();
  1191. p = br_port_get_check_rtnl(dev);
  1192. if (p)
  1193. vg = nbp_vlan_group(p);
  1194. else if (netif_is_bridge_master(dev))
  1195. vg = br_vlan_group(netdev_priv(dev));
  1196. else
  1197. return -EINVAL;
  1198. *p_pvid = br_get_pvid(vg);
  1199. return 0;
  1200. }
  1201. EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
  1202. int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
  1203. {
  1204. struct net_bridge_vlan_group *vg;
  1205. struct net_bridge_port *p;
  1206. p = br_port_get_check_rcu(dev);
  1207. if (p)
  1208. vg = nbp_vlan_group_rcu(p);
  1209. else if (netif_is_bridge_master(dev))
  1210. vg = br_vlan_group_rcu(netdev_priv(dev));
  1211. else
  1212. return -EINVAL;
  1213. *p_pvid = br_get_pvid(vg);
  1214. return 0;
  1215. }
  1216. EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
  1217. void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
  1218. struct net_device_path_ctx *ctx,
  1219. struct net_device_path *path)
  1220. {
  1221. struct net_bridge_vlan_group *vg;
  1222. int idx = ctx->num_vlans - 1;
  1223. u16 vid;
  1224. path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
  1225. if (!br_opt_get(br, BROPT_VLAN_ENABLED))
  1226. return;
  1227. vg = br_vlan_group(br);
  1228. if (idx >= 0 &&
  1229. ctx->vlan[idx].proto == br->vlan_proto) {
  1230. vid = ctx->vlan[idx].id;
  1231. } else {
  1232. path->bridge.vlan_mode = DEV_PATH_BR_VLAN_TAG;
  1233. vid = br_get_pvid(vg);
  1234. }
  1235. path->bridge.vlan_id = vid;
  1236. path->bridge.vlan_proto = br->vlan_proto;
  1237. }
  1238. int br_vlan_fill_forward_path_mode(struct net_bridge *br,
  1239. struct net_bridge_port *dst,
  1240. struct net_device_path *path)
  1241. {
  1242. struct net_bridge_vlan_group *vg;
  1243. struct net_bridge_vlan *v;
  1244. if (!br_opt_get(br, BROPT_VLAN_ENABLED))
  1245. return 0;
  1246. vg = nbp_vlan_group_rcu(dst);
  1247. v = br_vlan_find(vg, path->bridge.vlan_id);
  1248. if (!v || !br_vlan_should_use(v))
  1249. return -EINVAL;
  1250. if (!(v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
  1251. return 0;
  1252. if (path->bridge.vlan_mode == DEV_PATH_BR_VLAN_TAG)
  1253. path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
  1254. else if (v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
  1255. path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG_HW;
  1256. else
  1257. path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG;
  1258. return 0;
  1259. }
  1260. int br_vlan_get_info(const struct net_device *dev, u16 vid,
  1261. struct bridge_vlan_info *p_vinfo)
  1262. {
  1263. struct net_bridge_vlan_group *vg;
  1264. struct net_bridge_vlan *v;
  1265. struct net_bridge_port *p;
  1266. ASSERT_RTNL();
  1267. p = br_port_get_check_rtnl(dev);
  1268. if (p)
  1269. vg = nbp_vlan_group(p);
  1270. else if (netif_is_bridge_master(dev))
  1271. vg = br_vlan_group(netdev_priv(dev));
  1272. else
  1273. return -EINVAL;
  1274. v = br_vlan_find(vg, vid);
  1275. if (!v)
  1276. return -ENOENT;
  1277. p_vinfo->vid = vid;
  1278. p_vinfo->flags = v->flags;
  1279. if (vid == br_get_pvid(vg))
  1280. p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
  1281. return 0;
  1282. }
  1283. EXPORT_SYMBOL_GPL(br_vlan_get_info);
  1284. int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
  1285. struct bridge_vlan_info *p_vinfo)
  1286. {
  1287. struct net_bridge_vlan_group *vg;
  1288. struct net_bridge_vlan *v;
  1289. struct net_bridge_port *p;
  1290. p = br_port_get_check_rcu(dev);
  1291. if (p)
  1292. vg = nbp_vlan_group_rcu(p);
  1293. else if (netif_is_bridge_master(dev))
  1294. vg = br_vlan_group_rcu(netdev_priv(dev));
  1295. else
  1296. return -EINVAL;
  1297. v = br_vlan_find(vg, vid);
  1298. if (!v)
  1299. return -ENOENT;
  1300. p_vinfo->vid = vid;
  1301. p_vinfo->flags = v->flags;
  1302. if (vid == br_get_pvid(vg))
  1303. p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
  1304. return 0;
  1305. }
  1306. EXPORT_SYMBOL_GPL(br_vlan_get_info_rcu);
  1307. static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
  1308. {
  1309. return is_vlan_dev(dev) &&
  1310. !!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
  1311. }
  1312. static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
  1313. __always_unused struct netdev_nested_priv *priv)
  1314. {
  1315. return br_vlan_is_bind_vlan_dev(dev);
  1316. }
  1317. static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
  1318. {
  1319. int found;
  1320. rcu_read_lock();
  1321. found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
  1322. NULL);
  1323. rcu_read_unlock();
  1324. return !!found;
  1325. }
  1326. struct br_vlan_bind_walk_data {
  1327. u16 vid;
  1328. struct net_device *result;
  1329. };
  1330. static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
  1331. struct netdev_nested_priv *priv)
  1332. {
  1333. struct br_vlan_bind_walk_data *data = priv->data;
  1334. int found = 0;
  1335. if (br_vlan_is_bind_vlan_dev(dev) &&
  1336. vlan_dev_priv(dev)->vlan_id == data->vid) {
  1337. data->result = dev;
  1338. found = 1;
  1339. }
  1340. return found;
  1341. }
  1342. static struct net_device *
  1343. br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
  1344. {
  1345. struct br_vlan_bind_walk_data data = {
  1346. .vid = vid,
  1347. };
  1348. struct netdev_nested_priv priv = {
  1349. .data = (void *)&data,
  1350. };
  1351. rcu_read_lock();
  1352. netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
  1353. &priv);
  1354. rcu_read_unlock();
  1355. return data.result;
  1356. }
  1357. static bool br_vlan_is_dev_up(const struct net_device *dev)
  1358. {
  1359. return !!(dev->flags & IFF_UP) && netif_oper_up(dev);
  1360. }
  1361. static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
  1362. struct net_device *vlan_dev)
  1363. {
  1364. u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
  1365. struct net_bridge_vlan_group *vg;
  1366. struct net_bridge_port *p;
  1367. bool has_carrier = false;
  1368. if (!netif_carrier_ok(br->dev)) {
  1369. netif_carrier_off(vlan_dev);
  1370. return;
  1371. }
  1372. list_for_each_entry(p, &br->port_list, list) {
  1373. vg = nbp_vlan_group(p);
  1374. if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
  1375. has_carrier = true;
  1376. break;
  1377. }
  1378. }
  1379. if (has_carrier)
  1380. netif_carrier_on(vlan_dev);
  1381. else
  1382. netif_carrier_off(vlan_dev);
  1383. }
  1384. static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
  1385. {
  1386. struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
  1387. struct net_bridge_vlan *vlan;
  1388. struct net_device *vlan_dev;
  1389. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  1390. vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
  1391. vlan->vid);
  1392. if (vlan_dev) {
  1393. if (br_vlan_is_dev_up(p->dev)) {
  1394. if (netif_carrier_ok(p->br->dev))
  1395. netif_carrier_on(vlan_dev);
  1396. } else {
  1397. br_vlan_set_vlan_dev_state(p->br, vlan_dev);
  1398. }
  1399. }
  1400. }
  1401. }
  1402. static void br_vlan_upper_change(struct net_device *dev,
  1403. struct net_device *upper_dev,
  1404. bool linking)
  1405. {
  1406. struct net_bridge *br = netdev_priv(dev);
  1407. if (!br_vlan_is_bind_vlan_dev(upper_dev))
  1408. return;
  1409. if (linking) {
  1410. br_vlan_set_vlan_dev_state(br, upper_dev);
  1411. br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
  1412. } else {
  1413. br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
  1414. br_vlan_has_upper_bind_vlan_dev(dev));
  1415. }
  1416. }
  1417. struct br_vlan_link_state_walk_data {
  1418. struct net_bridge *br;
  1419. };
  1420. static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
  1421. struct netdev_nested_priv *priv)
  1422. {
  1423. struct br_vlan_link_state_walk_data *data = priv->data;
  1424. if (br_vlan_is_bind_vlan_dev(vlan_dev))
  1425. br_vlan_set_vlan_dev_state(data->br, vlan_dev);
  1426. return 0;
  1427. }
  1428. static void br_vlan_link_state_change(struct net_device *dev,
  1429. struct net_bridge *br)
  1430. {
  1431. struct br_vlan_link_state_walk_data data = {
  1432. .br = br
  1433. };
  1434. struct netdev_nested_priv priv = {
  1435. .data = (void *)&data,
  1436. };
  1437. rcu_read_lock();
  1438. netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
  1439. &priv);
  1440. rcu_read_unlock();
  1441. }
  1442. /* Must be protected by RTNL. */
  1443. static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
  1444. {
  1445. struct net_device *vlan_dev;
  1446. if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
  1447. return;
  1448. vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
  1449. if (vlan_dev)
  1450. br_vlan_set_vlan_dev_state(p->br, vlan_dev);
  1451. }
  1452. /* Must be protected by RTNL. */
  1453. int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
  1454. {
  1455. struct netdev_notifier_changeupper_info *info;
  1456. struct net_bridge *br = netdev_priv(dev);
  1457. int vlcmd = 0, ret = 0;
  1458. bool changed = false;
  1459. switch (event) {
  1460. case NETDEV_REGISTER:
  1461. ret = br_vlan_add(br, br->default_pvid,
  1462. BRIDGE_VLAN_INFO_PVID |
  1463. BRIDGE_VLAN_INFO_UNTAGGED |
  1464. BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
  1465. vlcmd = RTM_NEWVLAN;
  1466. break;
  1467. case NETDEV_UNREGISTER:
  1468. changed = !br_vlan_delete(br, br->default_pvid);
  1469. vlcmd = RTM_DELVLAN;
  1470. break;
  1471. case NETDEV_CHANGEUPPER:
  1472. info = ptr;
  1473. br_vlan_upper_change(dev, info->upper_dev, info->linking);
  1474. break;
  1475. case NETDEV_CHANGE:
  1476. case NETDEV_UP:
  1477. if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
  1478. break;
  1479. br_vlan_link_state_change(dev, br);
  1480. break;
  1481. }
  1482. if (changed)
  1483. br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
  1484. return ret;
  1485. }
  1486. /* Must be protected by RTNL. */
  1487. void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
  1488. {
  1489. if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
  1490. return;
  1491. switch (event) {
  1492. case NETDEV_CHANGE:
  1493. case NETDEV_DOWN:
  1494. case NETDEV_UP:
  1495. br_vlan_set_all_vlan_dev_state(p);
  1496. break;
  1497. }
  1498. }
  1499. static bool br_vlan_stats_fill(struct sk_buff *skb,
  1500. const struct net_bridge_vlan *v)
  1501. {
  1502. struct pcpu_sw_netstats stats;
  1503. struct nlattr *nest;
  1504. nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
  1505. if (!nest)
  1506. return false;
  1507. br_vlan_get_stats(v, &stats);
  1508. if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES,
  1509. u64_stats_read(&stats.rx_bytes),
  1510. BRIDGE_VLANDB_STATS_PAD) ||
  1511. nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
  1512. u64_stats_read(&stats.rx_packets),
  1513. BRIDGE_VLANDB_STATS_PAD) ||
  1514. nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES,
  1515. u64_stats_read(&stats.tx_bytes),
  1516. BRIDGE_VLANDB_STATS_PAD) ||
  1517. nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
  1518. u64_stats_read(&stats.tx_packets),
  1519. BRIDGE_VLANDB_STATS_PAD))
  1520. goto out_err;
  1521. nla_nest_end(skb, nest);
  1522. return true;
  1523. out_err:
  1524. nla_nest_cancel(skb, nest);
  1525. return false;
  1526. }
  1527. /* v_opts is used to dump the options which must be equal in the whole range */
  1528. static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
  1529. const struct net_bridge_vlan *v_opts,
  1530. u16 flags,
  1531. bool dump_stats)
  1532. {
  1533. struct bridge_vlan_info info;
  1534. struct nlattr *nest;
  1535. nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
  1536. if (!nest)
  1537. return false;
  1538. memset(&info, 0, sizeof(info));
  1539. info.vid = vid;
  1540. if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  1541. info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  1542. if (flags & BRIDGE_VLAN_INFO_PVID)
  1543. info.flags |= BRIDGE_VLAN_INFO_PVID;
  1544. if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
  1545. goto out_err;
  1546. if (vid_range && vid < vid_range &&
  1547. !(flags & BRIDGE_VLAN_INFO_PVID) &&
  1548. nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
  1549. goto out_err;
  1550. if (v_opts) {
  1551. if (!br_vlan_opts_fill(skb, v_opts))
  1552. goto out_err;
  1553. if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
  1554. goto out_err;
  1555. }
  1556. nla_nest_end(skb, nest);
  1557. return true;
  1558. out_err:
  1559. nla_nest_cancel(skb, nest);
  1560. return false;
  1561. }
  1562. static size_t rtnl_vlan_nlmsg_size(void)
  1563. {
  1564. return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
  1565. + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */
  1566. + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */
  1567. + nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */
  1568. + br_vlan_opts_nl_size(); /* bridge vlan options */
  1569. }
  1570. void br_vlan_notify(const struct net_bridge *br,
  1571. const struct net_bridge_port *p,
  1572. u16 vid, u16 vid_range,
  1573. int cmd)
  1574. {
  1575. struct net_bridge_vlan_group *vg;
  1576. struct net_bridge_vlan *v = NULL;
  1577. struct br_vlan_msg *bvm;
  1578. struct nlmsghdr *nlh;
  1579. struct sk_buff *skb;
  1580. int err = -ENOBUFS;
  1581. struct net *net;
  1582. u16 flags = 0;
  1583. int ifindex;
  1584. /* right now notifications are done only with rtnl held */
  1585. ASSERT_RTNL();
  1586. if (p) {
  1587. ifindex = p->dev->ifindex;
  1588. vg = nbp_vlan_group(p);
  1589. net = dev_net(p->dev);
  1590. } else {
  1591. ifindex = br->dev->ifindex;
  1592. vg = br_vlan_group(br);
  1593. net = dev_net(br->dev);
  1594. }
  1595. skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
  1596. if (!skb)
  1597. goto out_err;
  1598. err = -EMSGSIZE;
  1599. nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
  1600. if (!nlh)
  1601. goto out_err;
  1602. bvm = nlmsg_data(nlh);
  1603. memset(bvm, 0, sizeof(*bvm));
  1604. bvm->family = AF_BRIDGE;
  1605. bvm->ifindex = ifindex;
  1606. switch (cmd) {
  1607. case RTM_NEWVLAN:
  1608. /* need to find the vlan due to flags/options */
  1609. v = br_vlan_find(vg, vid);
  1610. if (!v || !br_vlan_should_use(v))
  1611. goto out_kfree;
  1612. flags = v->flags;
  1613. if (br_get_pvid(vg) == v->vid)
  1614. flags |= BRIDGE_VLAN_INFO_PVID;
  1615. break;
  1616. case RTM_DELVLAN:
  1617. break;
  1618. default:
  1619. goto out_kfree;
  1620. }
  1621. if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags, false))
  1622. goto out_err;
  1623. nlmsg_end(skb, nlh);
  1624. rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
  1625. return;
  1626. out_err:
  1627. rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
  1628. out_kfree:
  1629. kfree_skb(skb);
  1630. }
  1631. /* check if v_curr can enter a range ending in range_end */
  1632. bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
  1633. const struct net_bridge_vlan *range_end)
  1634. {
  1635. return v_curr->vid - range_end->vid == 1 &&
  1636. range_end->flags == v_curr->flags &&
  1637. br_vlan_opts_eq_range(v_curr, range_end);
  1638. }
  1639. static int br_vlan_dump_dev(const struct net_device *dev,
  1640. struct sk_buff *skb,
  1641. struct netlink_callback *cb,
  1642. u32 dump_flags)
  1643. {
  1644. struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
  1645. bool dump_global = !!(dump_flags & BRIDGE_VLANDB_DUMPF_GLOBAL);
  1646. bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
  1647. struct net_bridge_vlan_group *vg;
  1648. int idx = 0, s_idx = cb->args[1];
  1649. struct nlmsghdr *nlh = NULL;
  1650. struct net_bridge_port *p;
  1651. struct br_vlan_msg *bvm;
  1652. struct net_bridge *br;
  1653. int err = 0;
  1654. u16 pvid;
  1655. if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
  1656. return -EINVAL;
  1657. if (netif_is_bridge_master(dev)) {
  1658. br = netdev_priv(dev);
  1659. vg = br_vlan_group_rcu(br);
  1660. p = NULL;
  1661. } else {
  1662. /* global options are dumped only for bridge devices */
  1663. if (dump_global)
  1664. return 0;
  1665. p = br_port_get_rcu(dev);
  1666. if (WARN_ON(!p))
  1667. return -EINVAL;
  1668. vg = nbp_vlan_group_rcu(p);
  1669. br = p->br;
  1670. }
  1671. if (!vg)
  1672. return 0;
  1673. nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  1674. RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
  1675. if (!nlh)
  1676. return -EMSGSIZE;
  1677. bvm = nlmsg_data(nlh);
  1678. memset(bvm, 0, sizeof(*bvm));
  1679. bvm->family = PF_BRIDGE;
  1680. bvm->ifindex = dev->ifindex;
  1681. pvid = br_get_pvid(vg);
  1682. /* idx must stay at range's beginning until it is filled in */
  1683. list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
  1684. if (!dump_global && !br_vlan_should_use(v))
  1685. continue;
  1686. if (idx < s_idx) {
  1687. idx++;
  1688. continue;
  1689. }
  1690. if (!range_start) {
  1691. range_start = v;
  1692. range_end = v;
  1693. continue;
  1694. }
  1695. if (dump_global) {
  1696. if (br_vlan_global_opts_can_enter_range(v, range_end))
  1697. goto update_end;
  1698. if (!br_vlan_global_opts_fill(skb, range_start->vid,
  1699. range_end->vid,
  1700. range_start)) {
  1701. err = -EMSGSIZE;
  1702. break;
  1703. }
  1704. /* advance number of filled vlans */
  1705. idx += range_end->vid - range_start->vid + 1;
  1706. range_start = v;
  1707. } else if (dump_stats || v->vid == pvid ||
  1708. !br_vlan_can_enter_range(v, range_end)) {
  1709. u16 vlan_flags = br_vlan_flags(range_start, pvid);
  1710. if (!br_vlan_fill_vids(skb, range_start->vid,
  1711. range_end->vid, range_start,
  1712. vlan_flags, dump_stats)) {
  1713. err = -EMSGSIZE;
  1714. break;
  1715. }
  1716. /* advance number of filled vlans */
  1717. idx += range_end->vid - range_start->vid + 1;
  1718. range_start = v;
  1719. }
  1720. update_end:
  1721. range_end = v;
  1722. }
  1723. /* err will be 0 and range_start will be set in 3 cases here:
  1724. * - first vlan (range_start == range_end)
  1725. * - last vlan (range_start == range_end, not in range)
  1726. * - last vlan range (range_start != range_end, in range)
  1727. */
  1728. if (!err && range_start) {
  1729. if (dump_global &&
  1730. !br_vlan_global_opts_fill(skb, range_start->vid,
  1731. range_end->vid, range_start))
  1732. err = -EMSGSIZE;
  1733. else if (!dump_global &&
  1734. !br_vlan_fill_vids(skb, range_start->vid,
  1735. range_end->vid, range_start,
  1736. br_vlan_flags(range_start, pvid),
  1737. dump_stats))
  1738. err = -EMSGSIZE;
  1739. }
  1740. cb->args[1] = err ? idx : 0;
  1741. nlmsg_end(skb, nlh);
  1742. return err;
  1743. }
  1744. static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
  1745. [BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
  1746. };
  1747. static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
  1748. {
  1749. struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
  1750. int idx = 0, err = 0, s_idx = cb->args[0];
  1751. struct net *net = sock_net(skb->sk);
  1752. struct br_vlan_msg *bvm;
  1753. struct net_device *dev;
  1754. u32 dump_flags = 0;
  1755. err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
  1756. br_vlan_db_dump_pol, cb->extack);
  1757. if (err < 0)
  1758. return err;
  1759. bvm = nlmsg_data(cb->nlh);
  1760. if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
  1761. dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
  1762. rcu_read_lock();
  1763. if (bvm->ifindex) {
  1764. dev = dev_get_by_index_rcu(net, bvm->ifindex);
  1765. if (!dev) {
  1766. err = -ENODEV;
  1767. goto out_err;
  1768. }
  1769. err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
  1770. /* if the dump completed without an error we return 0 here */
  1771. if (err != -EMSGSIZE)
  1772. goto out_err;
  1773. } else {
  1774. for_each_netdev_rcu(net, dev) {
  1775. if (idx < s_idx)
  1776. goto skip;
  1777. err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
  1778. if (err == -EMSGSIZE)
  1779. break;
  1780. skip:
  1781. idx++;
  1782. }
  1783. }
  1784. cb->args[0] = idx;
  1785. rcu_read_unlock();
  1786. return skb->len;
  1787. out_err:
  1788. rcu_read_unlock();
  1789. return err;
  1790. }
  1791. static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
  1792. [BRIDGE_VLANDB_ENTRY_INFO] =
  1793. NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
  1794. [BRIDGE_VLANDB_ENTRY_RANGE] = { .type = NLA_U16 },
  1795. [BRIDGE_VLANDB_ENTRY_STATE] = { .type = NLA_U8 },
  1796. [BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
  1797. [BRIDGE_VLANDB_ENTRY_MCAST_ROUTER] = { .type = NLA_U8 },
  1798. };
  1799. static int br_vlan_rtm_process_one(struct net_device *dev,
  1800. const struct nlattr *attr,
  1801. int cmd, struct netlink_ext_ack *extack)
  1802. {
  1803. struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
  1804. struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
  1805. bool changed = false, skip_processing = false;
  1806. struct net_bridge_vlan_group *vg;
  1807. struct net_bridge_port *p = NULL;
  1808. int err = 0, cmdmap = 0;
  1809. struct net_bridge *br;
  1810. if (netif_is_bridge_master(dev)) {
  1811. br = netdev_priv(dev);
  1812. vg = br_vlan_group(br);
  1813. } else {
  1814. p = br_port_get_rtnl(dev);
  1815. if (WARN_ON(!p))
  1816. return -ENODEV;
  1817. br = p->br;
  1818. vg = nbp_vlan_group(p);
  1819. }
  1820. if (WARN_ON(!vg))
  1821. return -ENODEV;
  1822. err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
  1823. br_vlan_db_policy, extack);
  1824. if (err)
  1825. return err;
  1826. if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
  1827. NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
  1828. return -EINVAL;
  1829. }
  1830. memset(&vrange_end, 0, sizeof(vrange_end));
  1831. vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
  1832. if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
  1833. BRIDGE_VLAN_INFO_RANGE_END)) {
  1834. NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
  1835. return -EINVAL;
  1836. }
  1837. if (!br_vlan_valid_id(vinfo->vid, extack))
  1838. return -EINVAL;
  1839. if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
  1840. vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
  1841. /* validate user-provided flags without RANGE_BEGIN */
  1842. vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
  1843. vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
  1844. /* vinfo_last is the range start, vinfo the range end */
  1845. vinfo_last = vinfo;
  1846. vinfo = &vrange_end;
  1847. if (!br_vlan_valid_id(vinfo->vid, extack) ||
  1848. !br_vlan_valid_range(vinfo, vinfo_last, extack))
  1849. return -EINVAL;
  1850. }
  1851. switch (cmd) {
  1852. case RTM_NEWVLAN:
  1853. cmdmap = RTM_SETLINK;
  1854. skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS);
  1855. break;
  1856. case RTM_DELVLAN:
  1857. cmdmap = RTM_DELLINK;
  1858. break;
  1859. }
  1860. if (!skip_processing) {
  1861. struct bridge_vlan_info *tmp_last = vinfo_last;
  1862. /* br_process_vlan_info may overwrite vinfo_last */
  1863. err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last,
  1864. &changed, extack);
  1865. /* notify first if anything changed */
  1866. if (changed)
  1867. br_ifinfo_notify(cmdmap, br, p);
  1868. if (err)
  1869. return err;
  1870. }
  1871. /* deal with options */
  1872. if (cmd == RTM_NEWVLAN) {
  1873. struct net_bridge_vlan *range_start, *range_end;
  1874. if (vinfo_last) {
  1875. range_start = br_vlan_find(vg, vinfo_last->vid);
  1876. range_end = br_vlan_find(vg, vinfo->vid);
  1877. } else {
  1878. range_start = br_vlan_find(vg, vinfo->vid);
  1879. range_end = range_start;
  1880. }
  1881. err = br_vlan_process_options(br, p, range_start, range_end,
  1882. tb, extack);
  1883. }
  1884. return err;
  1885. }
  1886. static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
  1887. struct netlink_ext_ack *extack)
  1888. {
  1889. struct net *net = sock_net(skb->sk);
  1890. struct br_vlan_msg *bvm;
  1891. struct net_device *dev;
  1892. struct nlattr *attr;
  1893. int err, vlans = 0;
  1894. int rem;
  1895. /* this should validate the header and check for remaining bytes */
  1896. err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
  1897. extack);
  1898. if (err < 0)
  1899. return err;
  1900. bvm = nlmsg_data(nlh);
  1901. dev = __dev_get_by_index(net, bvm->ifindex);
  1902. if (!dev)
  1903. return -ENODEV;
  1904. if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
  1905. NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
  1906. return -EINVAL;
  1907. }
  1908. nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
  1909. switch (nla_type(attr)) {
  1910. case BRIDGE_VLANDB_ENTRY:
  1911. err = br_vlan_rtm_process_one(dev, attr,
  1912. nlh->nlmsg_type,
  1913. extack);
  1914. break;
  1915. case BRIDGE_VLANDB_GLOBAL_OPTIONS:
  1916. err = br_vlan_rtm_process_global_options(dev, attr,
  1917. nlh->nlmsg_type,
  1918. extack);
  1919. break;
  1920. default:
  1921. continue;
  1922. }
  1923. vlans++;
  1924. if (err)
  1925. break;
  1926. }
  1927. if (!vlans) {
  1928. NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
  1929. err = -EINVAL;
  1930. }
  1931. return err;
  1932. }
  1933. void br_vlan_rtnl_init(void)
  1934. {
  1935. rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
  1936. br_vlan_rtm_dump, 0);
  1937. rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
  1938. br_vlan_rtm_process, NULL, 0);
  1939. rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
  1940. br_vlan_rtm_process, NULL, 0);
  1941. }
  1942. void br_vlan_rtnl_uninit(void)
  1943. {
  1944. rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
  1945. rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
  1946. rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
  1947. }