switch.c 23 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Handling of a single switch chip, part of a switch fabric
  4. *
  5. * Copyright (c) 2017 Savoir-faire Linux Inc.
  6. * Vivien Didelot <[email protected]>
  7. */
  8. #include <linux/if_bridge.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/notifier.h>
  11. #include <linux/if_vlan.h>
  12. #include <net/switchdev.h>
  13. #include "dsa_priv.h"
  14. static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
  15. unsigned int ageing_time)
  16. {
  17. struct dsa_port *dp;
  18. dsa_switch_for_each_port(dp, ds)
  19. if (dp->ageing_time && dp->ageing_time < ageing_time)
  20. ageing_time = dp->ageing_time;
  21. return ageing_time;
  22. }
  23. static int dsa_switch_ageing_time(struct dsa_switch *ds,
  24. struct dsa_notifier_ageing_time_info *info)
  25. {
  26. unsigned int ageing_time = info->ageing_time;
  27. if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
  28. return -ERANGE;
  29. if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
  30. return -ERANGE;
  31. /* Program the fastest ageing time in case of multiple bridges */
  32. ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
  33. if (ds->ops->set_ageing_time)
  34. return ds->ops->set_ageing_time(ds, ageing_time);
  35. return 0;
  36. }
  37. static bool dsa_port_mtu_match(struct dsa_port *dp,
  38. struct dsa_notifier_mtu_info *info)
  39. {
  40. return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp);
  41. }
  42. static int dsa_switch_mtu(struct dsa_switch *ds,
  43. struct dsa_notifier_mtu_info *info)
  44. {
  45. struct dsa_port *dp;
  46. int ret;
  47. if (!ds->ops->port_change_mtu)
  48. return -EOPNOTSUPP;
  49. dsa_switch_for_each_port(dp, ds) {
  50. if (dsa_port_mtu_match(dp, info)) {
  51. ret = ds->ops->port_change_mtu(ds, dp->index,
  52. info->mtu);
  53. if (ret)
  54. return ret;
  55. }
  56. }
  57. return 0;
  58. }
  59. static int dsa_switch_bridge_join(struct dsa_switch *ds,
  60. struct dsa_notifier_bridge_info *info)
  61. {
  62. int err;
  63. if (info->dp->ds == ds) {
  64. if (!ds->ops->port_bridge_join)
  65. return -EOPNOTSUPP;
  66. err = ds->ops->port_bridge_join(ds, info->dp->index,
  67. info->bridge,
  68. &info->tx_fwd_offload,
  69. info->extack);
  70. if (err)
  71. return err;
  72. }
  73. if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) {
  74. err = ds->ops->crosschip_bridge_join(ds,
  75. info->dp->ds->dst->index,
  76. info->dp->ds->index,
  77. info->dp->index,
  78. info->bridge,
  79. info->extack);
  80. if (err)
  81. return err;
  82. }
  83. return 0;
  84. }
  85. static int dsa_switch_bridge_leave(struct dsa_switch *ds,
  86. struct dsa_notifier_bridge_info *info)
  87. {
  88. if (info->dp->ds == ds && ds->ops->port_bridge_leave)
  89. ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge);
  90. if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave)
  91. ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index,
  92. info->dp->ds->index,
  93. info->dp->index,
  94. info->bridge);
  95. return 0;
  96. }
  97. /* Matches for all upstream-facing ports (the CPU port and all upstream-facing
  98. * DSA links) that sit between the targeted port on which the notifier was
  99. * emitted and its dedicated CPU port.
  100. */
  101. static bool dsa_port_host_address_match(struct dsa_port *dp,
  102. const struct dsa_port *targeted_dp)
  103. {
  104. struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
  105. if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
  106. return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
  107. cpu_dp->index);
  108. return false;
  109. }
  110. static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
  111. const unsigned char *addr, u16 vid,
  112. struct dsa_db db)
  113. {
  114. struct dsa_mac_addr *a;
  115. list_for_each_entry(a, addr_list, list)
  116. if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
  117. dsa_db_equal(&a->db, &db))
  118. return a;
  119. return NULL;
  120. }
  121. static int dsa_port_do_mdb_add(struct dsa_port *dp,
  122. const struct switchdev_obj_port_mdb *mdb,
  123. struct dsa_db db)
  124. {
  125. struct dsa_switch *ds = dp->ds;
  126. struct dsa_mac_addr *a;
  127. int port = dp->index;
  128. int err = 0;
  129. /* No need to bother with refcounting for user ports */
  130. if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
  131. return ds->ops->port_mdb_add(ds, port, mdb, db);
  132. mutex_lock(&dp->addr_lists_lock);
  133. a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
  134. if (a) {
  135. refcount_inc(&a->refcount);
  136. goto out;
  137. }
  138. a = kzalloc(sizeof(*a), GFP_KERNEL);
  139. if (!a) {
  140. err = -ENOMEM;
  141. goto out;
  142. }
  143. err = ds->ops->port_mdb_add(ds, port, mdb, db);
  144. if (err) {
  145. kfree(a);
  146. goto out;
  147. }
  148. ether_addr_copy(a->addr, mdb->addr);
  149. a->vid = mdb->vid;
  150. a->db = db;
  151. refcount_set(&a->refcount, 1);
  152. list_add_tail(&a->list, &dp->mdbs);
  153. out:
  154. mutex_unlock(&dp->addr_lists_lock);
  155. return err;
  156. }
  157. static int dsa_port_do_mdb_del(struct dsa_port *dp,
  158. const struct switchdev_obj_port_mdb *mdb,
  159. struct dsa_db db)
  160. {
  161. struct dsa_switch *ds = dp->ds;
  162. struct dsa_mac_addr *a;
  163. int port = dp->index;
  164. int err = 0;
  165. /* No need to bother with refcounting for user ports */
  166. if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
  167. return ds->ops->port_mdb_del(ds, port, mdb, db);
  168. mutex_lock(&dp->addr_lists_lock);
  169. a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
  170. if (!a) {
  171. err = -ENOENT;
  172. goto out;
  173. }
  174. if (!refcount_dec_and_test(&a->refcount))
  175. goto out;
  176. err = ds->ops->port_mdb_del(ds, port, mdb, db);
  177. if (err) {
  178. refcount_set(&a->refcount, 1);
  179. goto out;
  180. }
  181. list_del(&a->list);
  182. kfree(a);
  183. out:
  184. mutex_unlock(&dp->addr_lists_lock);
  185. return err;
  186. }
  187. static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
  188. u16 vid, struct dsa_db db)
  189. {
  190. struct dsa_switch *ds = dp->ds;
  191. struct dsa_mac_addr *a;
  192. int port = dp->index;
  193. int err = 0;
  194. /* No need to bother with refcounting for user ports */
  195. if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
  196. return ds->ops->port_fdb_add(ds, port, addr, vid, db);
  197. mutex_lock(&dp->addr_lists_lock);
  198. a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
  199. if (a) {
  200. refcount_inc(&a->refcount);
  201. goto out;
  202. }
  203. a = kzalloc(sizeof(*a), GFP_KERNEL);
  204. if (!a) {
  205. err = -ENOMEM;
  206. goto out;
  207. }
  208. err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
  209. if (err) {
  210. kfree(a);
  211. goto out;
  212. }
  213. ether_addr_copy(a->addr, addr);
  214. a->vid = vid;
  215. a->db = db;
  216. refcount_set(&a->refcount, 1);
  217. list_add_tail(&a->list, &dp->fdbs);
  218. out:
  219. mutex_unlock(&dp->addr_lists_lock);
  220. return err;
  221. }
  222. static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
  223. u16 vid, struct dsa_db db)
  224. {
  225. struct dsa_switch *ds = dp->ds;
  226. struct dsa_mac_addr *a;
  227. int port = dp->index;
  228. int err = 0;
  229. /* No need to bother with refcounting for user ports */
  230. if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
  231. return ds->ops->port_fdb_del(ds, port, addr, vid, db);
  232. mutex_lock(&dp->addr_lists_lock);
  233. a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
  234. if (!a) {
  235. err = -ENOENT;
  236. goto out;
  237. }
  238. if (!refcount_dec_and_test(&a->refcount))
  239. goto out;
  240. err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
  241. if (err) {
  242. refcount_set(&a->refcount, 1);
  243. goto out;
  244. }
  245. list_del(&a->list);
  246. kfree(a);
  247. out:
  248. mutex_unlock(&dp->addr_lists_lock);
  249. return err;
  250. }
  251. static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
  252. const unsigned char *addr, u16 vid,
  253. struct dsa_db db)
  254. {
  255. struct dsa_mac_addr *a;
  256. int err = 0;
  257. mutex_lock(&lag->fdb_lock);
  258. a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
  259. if (a) {
  260. refcount_inc(&a->refcount);
  261. goto out;
  262. }
  263. a = kzalloc(sizeof(*a), GFP_KERNEL);
  264. if (!a) {
  265. err = -ENOMEM;
  266. goto out;
  267. }
  268. err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
  269. if (err) {
  270. kfree(a);
  271. goto out;
  272. }
  273. ether_addr_copy(a->addr, addr);
  274. a->vid = vid;
  275. a->db = db;
  276. refcount_set(&a->refcount, 1);
  277. list_add_tail(&a->list, &lag->fdbs);
  278. out:
  279. mutex_unlock(&lag->fdb_lock);
  280. return err;
  281. }
  282. static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
  283. const unsigned char *addr, u16 vid,
  284. struct dsa_db db)
  285. {
  286. struct dsa_mac_addr *a;
  287. int err = 0;
  288. mutex_lock(&lag->fdb_lock);
  289. a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
  290. if (!a) {
  291. err = -ENOENT;
  292. goto out;
  293. }
  294. if (!refcount_dec_and_test(&a->refcount))
  295. goto out;
  296. err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
  297. if (err) {
  298. refcount_set(&a->refcount, 1);
  299. goto out;
  300. }
  301. list_del(&a->list);
  302. kfree(a);
  303. out:
  304. mutex_unlock(&lag->fdb_lock);
  305. return err;
  306. }
  307. static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
  308. struct dsa_notifier_fdb_info *info)
  309. {
  310. struct dsa_port *dp;
  311. int err = 0;
  312. if (!ds->ops->port_fdb_add)
  313. return -EOPNOTSUPP;
  314. dsa_switch_for_each_port(dp, ds) {
  315. if (dsa_port_host_address_match(dp, info->dp)) {
  316. if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
  317. err = dsa_switch_do_lag_fdb_add(ds, dp->lag,
  318. info->addr,
  319. info->vid,
  320. info->db);
  321. } else {
  322. err = dsa_port_do_fdb_add(dp, info->addr,
  323. info->vid, info->db);
  324. }
  325. if (err)
  326. break;
  327. }
  328. }
  329. return err;
  330. }
  331. static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
  332. struct dsa_notifier_fdb_info *info)
  333. {
  334. struct dsa_port *dp;
  335. int err = 0;
  336. if (!ds->ops->port_fdb_del)
  337. return -EOPNOTSUPP;
  338. dsa_switch_for_each_port(dp, ds) {
  339. if (dsa_port_host_address_match(dp, info->dp)) {
  340. if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
  341. err = dsa_switch_do_lag_fdb_del(ds, dp->lag,
  342. info->addr,
  343. info->vid,
  344. info->db);
  345. } else {
  346. err = dsa_port_do_fdb_del(dp, info->addr,
  347. info->vid, info->db);
  348. }
  349. if (err)
  350. break;
  351. }
  352. }
  353. return err;
  354. }
  355. static int dsa_switch_fdb_add(struct dsa_switch *ds,
  356. struct dsa_notifier_fdb_info *info)
  357. {
  358. int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
  359. struct dsa_port *dp = dsa_to_port(ds, port);
  360. if (!ds->ops->port_fdb_add)
  361. return -EOPNOTSUPP;
  362. return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
  363. }
  364. static int dsa_switch_fdb_del(struct dsa_switch *ds,
  365. struct dsa_notifier_fdb_info *info)
  366. {
  367. int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
  368. struct dsa_port *dp = dsa_to_port(ds, port);
  369. if (!ds->ops->port_fdb_del)
  370. return -EOPNOTSUPP;
  371. return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
  372. }
  373. static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
  374. struct dsa_notifier_lag_fdb_info *info)
  375. {
  376. struct dsa_port *dp;
  377. if (!ds->ops->lag_fdb_add)
  378. return -EOPNOTSUPP;
  379. /* Notify switch only if it has a port in this LAG */
  380. dsa_switch_for_each_port(dp, ds)
  381. if (dsa_port_offloads_lag(dp, info->lag))
  382. return dsa_switch_do_lag_fdb_add(ds, info->lag,
  383. info->addr, info->vid,
  384. info->db);
  385. return 0;
  386. }
  387. static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
  388. struct dsa_notifier_lag_fdb_info *info)
  389. {
  390. struct dsa_port *dp;
  391. if (!ds->ops->lag_fdb_del)
  392. return -EOPNOTSUPP;
  393. /* Notify switch only if it has a port in this LAG */
  394. dsa_switch_for_each_port(dp, ds)
  395. if (dsa_port_offloads_lag(dp, info->lag))
  396. return dsa_switch_do_lag_fdb_del(ds, info->lag,
  397. info->addr, info->vid,
  398. info->db);
  399. return 0;
  400. }
  401. static int dsa_switch_lag_change(struct dsa_switch *ds,
  402. struct dsa_notifier_lag_info *info)
  403. {
  404. if (info->dp->ds == ds && ds->ops->port_lag_change)
  405. return ds->ops->port_lag_change(ds, info->dp->index);
  406. if (info->dp->ds != ds && ds->ops->crosschip_lag_change)
  407. return ds->ops->crosschip_lag_change(ds, info->dp->ds->index,
  408. info->dp->index);
  409. return 0;
  410. }
  411. static int dsa_switch_lag_join(struct dsa_switch *ds,
  412. struct dsa_notifier_lag_info *info)
  413. {
  414. if (info->dp->ds == ds && ds->ops->port_lag_join)
  415. return ds->ops->port_lag_join(ds, info->dp->index, info->lag,
  416. info->info, info->extack);
  417. if (info->dp->ds != ds && ds->ops->crosschip_lag_join)
  418. return ds->ops->crosschip_lag_join(ds, info->dp->ds->index,
  419. info->dp->index, info->lag,
  420. info->info, info->extack);
  421. return -EOPNOTSUPP;
  422. }
  423. static int dsa_switch_lag_leave(struct dsa_switch *ds,
  424. struct dsa_notifier_lag_info *info)
  425. {
  426. if (info->dp->ds == ds && ds->ops->port_lag_leave)
  427. return ds->ops->port_lag_leave(ds, info->dp->index, info->lag);
  428. if (info->dp->ds != ds && ds->ops->crosschip_lag_leave)
  429. return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index,
  430. info->dp->index, info->lag);
  431. return -EOPNOTSUPP;
  432. }
  433. static int dsa_switch_mdb_add(struct dsa_switch *ds,
  434. struct dsa_notifier_mdb_info *info)
  435. {
  436. int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
  437. struct dsa_port *dp = dsa_to_port(ds, port);
  438. if (!ds->ops->port_mdb_add)
  439. return -EOPNOTSUPP;
  440. return dsa_port_do_mdb_add(dp, info->mdb, info->db);
  441. }
  442. static int dsa_switch_mdb_del(struct dsa_switch *ds,
  443. struct dsa_notifier_mdb_info *info)
  444. {
  445. int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
  446. struct dsa_port *dp = dsa_to_port(ds, port);
  447. if (!ds->ops->port_mdb_del)
  448. return -EOPNOTSUPP;
  449. return dsa_port_do_mdb_del(dp, info->mdb, info->db);
  450. }
  451. static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
  452. struct dsa_notifier_mdb_info *info)
  453. {
  454. struct dsa_port *dp;
  455. int err = 0;
  456. if (!ds->ops->port_mdb_add)
  457. return -EOPNOTSUPP;
  458. dsa_switch_for_each_port(dp, ds) {
  459. if (dsa_port_host_address_match(dp, info->dp)) {
  460. err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
  461. if (err)
  462. break;
  463. }
  464. }
  465. return err;
  466. }
  467. static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
  468. struct dsa_notifier_mdb_info *info)
  469. {
  470. struct dsa_port *dp;
  471. int err = 0;
  472. if (!ds->ops->port_mdb_del)
  473. return -EOPNOTSUPP;
  474. dsa_switch_for_each_port(dp, ds) {
  475. if (dsa_port_host_address_match(dp, info->dp)) {
  476. err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
  477. if (err)
  478. break;
  479. }
  480. }
  481. return err;
  482. }
  483. /* Port VLANs match on the targeted port and on all DSA ports */
  484. static bool dsa_port_vlan_match(struct dsa_port *dp,
  485. struct dsa_notifier_vlan_info *info)
  486. {
  487. return dsa_port_is_dsa(dp) || dp == info->dp;
  488. }
  489. /* Host VLANs match on the targeted port's CPU port, and on all DSA ports
  490. * (upstream and downstream) of that switch and its upstream switches.
  491. */
  492. static bool dsa_port_host_vlan_match(struct dsa_port *dp,
  493. const struct dsa_port *targeted_dp)
  494. {
  495. struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
  496. if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
  497. return dsa_port_is_dsa(dp) || dp == cpu_dp;
  498. return false;
  499. }
  500. static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
  501. const struct switchdev_obj_port_vlan *vlan)
  502. {
  503. struct dsa_vlan *v;
  504. list_for_each_entry(v, vlan_list, list)
  505. if (v->vid == vlan->vid)
  506. return v;
  507. return NULL;
  508. }
  509. static int dsa_port_do_vlan_add(struct dsa_port *dp,
  510. const struct switchdev_obj_port_vlan *vlan,
  511. struct netlink_ext_ack *extack)
  512. {
  513. struct dsa_switch *ds = dp->ds;
  514. int port = dp->index;
  515. struct dsa_vlan *v;
  516. int err = 0;
  517. /* No need to bother with refcounting for user ports. */
  518. if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
  519. return ds->ops->port_vlan_add(ds, port, vlan, extack);
  520. /* No need to propagate on shared ports the existing VLANs that were
  521. * re-notified after just the flags have changed. This would cause a
  522. * refcount bump which we need to avoid, since it unbalances the
  523. * additions with the deletions.
  524. */
  525. if (vlan->changed)
  526. return 0;
  527. mutex_lock(&dp->vlans_lock);
  528. v = dsa_vlan_find(&dp->vlans, vlan);
  529. if (v) {
  530. refcount_inc(&v->refcount);
  531. goto out;
  532. }
  533. v = kzalloc(sizeof(*v), GFP_KERNEL);
  534. if (!v) {
  535. err = -ENOMEM;
  536. goto out;
  537. }
  538. err = ds->ops->port_vlan_add(ds, port, vlan, extack);
  539. if (err) {
  540. kfree(v);
  541. goto out;
  542. }
  543. v->vid = vlan->vid;
  544. refcount_set(&v->refcount, 1);
  545. list_add_tail(&v->list, &dp->vlans);
  546. out:
  547. mutex_unlock(&dp->vlans_lock);
  548. return err;
  549. }
  550. static int dsa_port_do_vlan_del(struct dsa_port *dp,
  551. const struct switchdev_obj_port_vlan *vlan)
  552. {
  553. struct dsa_switch *ds = dp->ds;
  554. int port = dp->index;
  555. struct dsa_vlan *v;
  556. int err = 0;
  557. /* No need to bother with refcounting for user ports */
  558. if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
  559. return ds->ops->port_vlan_del(ds, port, vlan);
  560. mutex_lock(&dp->vlans_lock);
  561. v = dsa_vlan_find(&dp->vlans, vlan);
  562. if (!v) {
  563. err = -ENOENT;
  564. goto out;
  565. }
  566. if (!refcount_dec_and_test(&v->refcount))
  567. goto out;
  568. err = ds->ops->port_vlan_del(ds, port, vlan);
  569. if (err) {
  570. refcount_set(&v->refcount, 1);
  571. goto out;
  572. }
  573. list_del(&v->list);
  574. kfree(v);
  575. out:
  576. mutex_unlock(&dp->vlans_lock);
  577. return err;
  578. }
  579. static int dsa_switch_vlan_add(struct dsa_switch *ds,
  580. struct dsa_notifier_vlan_info *info)
  581. {
  582. struct dsa_port *dp;
  583. int err;
  584. if (!ds->ops->port_vlan_add)
  585. return -EOPNOTSUPP;
  586. dsa_switch_for_each_port(dp, ds) {
  587. if (dsa_port_vlan_match(dp, info)) {
  588. err = dsa_port_do_vlan_add(dp, info->vlan,
  589. info->extack);
  590. if (err)
  591. return err;
  592. }
  593. }
  594. return 0;
  595. }
  596. static int dsa_switch_vlan_del(struct dsa_switch *ds,
  597. struct dsa_notifier_vlan_info *info)
  598. {
  599. struct dsa_port *dp;
  600. int err;
  601. if (!ds->ops->port_vlan_del)
  602. return -EOPNOTSUPP;
  603. dsa_switch_for_each_port(dp, ds) {
  604. if (dsa_port_vlan_match(dp, info)) {
  605. err = dsa_port_do_vlan_del(dp, info->vlan);
  606. if (err)
  607. return err;
  608. }
  609. }
  610. return 0;
  611. }
  612. static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
  613. struct dsa_notifier_vlan_info *info)
  614. {
  615. struct dsa_port *dp;
  616. int err;
  617. if (!ds->ops->port_vlan_add)
  618. return -EOPNOTSUPP;
  619. dsa_switch_for_each_port(dp, ds) {
  620. if (dsa_port_host_vlan_match(dp, info->dp)) {
  621. err = dsa_port_do_vlan_add(dp, info->vlan,
  622. info->extack);
  623. if (err)
  624. return err;
  625. }
  626. }
  627. return 0;
  628. }
  629. static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
  630. struct dsa_notifier_vlan_info *info)
  631. {
  632. struct dsa_port *dp;
  633. int err;
  634. if (!ds->ops->port_vlan_del)
  635. return -EOPNOTSUPP;
  636. dsa_switch_for_each_port(dp, ds) {
  637. if (dsa_port_host_vlan_match(dp, info->dp)) {
  638. err = dsa_port_do_vlan_del(dp, info->vlan);
  639. if (err)
  640. return err;
  641. }
  642. }
  643. return 0;
  644. }
  645. static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
  646. struct dsa_notifier_tag_proto_info *info)
  647. {
  648. const struct dsa_device_ops *tag_ops = info->tag_ops;
  649. struct dsa_port *dp, *cpu_dp;
  650. int err;
  651. if (!ds->ops->change_tag_protocol)
  652. return -EOPNOTSUPP;
  653. ASSERT_RTNL();
  654. err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
  655. if (err)
  656. return err;
  657. dsa_switch_for_each_cpu_port(cpu_dp, ds)
  658. dsa_port_set_tag_protocol(cpu_dp, tag_ops);
  659. /* Now that changing the tag protocol can no longer fail, let's update
  660. * the remaining bits which are "duplicated for faster access", and the
  661. * bits that depend on the tagger, such as the MTU.
  662. */
  663. dsa_switch_for_each_user_port(dp, ds) {
  664. struct net_device *slave = dp->slave;
  665. dsa_slave_setup_tagger(slave);
  666. /* rtnl_mutex is held in dsa_tree_change_tag_proto */
  667. dsa_slave_change_mtu(slave, slave->mtu);
  668. }
  669. return 0;
  670. }
  671. /* We use the same cross-chip notifiers to inform both the tagger side, as well
  672. * as the switch side, of connection and disconnection events.
  673. * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
  674. * switch side doesn't support connecting to this tagger, and therefore, the
  675. * fact that we don't disconnect the tagger side doesn't constitute a memory
  676. * leak: the tagger will still operate with persistent per-switch memory, just
  677. * with the switch side unconnected to it. What does constitute a hard error is
  678. * when the switch side supports connecting but fails.
  679. */
  680. static int
  681. dsa_switch_connect_tag_proto(struct dsa_switch *ds,
  682. struct dsa_notifier_tag_proto_info *info)
  683. {
  684. const struct dsa_device_ops *tag_ops = info->tag_ops;
  685. int err;
  686. /* Notify the new tagger about the connection to this switch */
  687. if (tag_ops->connect) {
  688. err = tag_ops->connect(ds);
  689. if (err)
  690. return err;
  691. }
  692. if (!ds->ops->connect_tag_protocol)
  693. return -EOPNOTSUPP;
  694. /* Notify the switch about the connection to the new tagger */
  695. err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
  696. if (err) {
  697. /* Revert the new tagger's connection to this tree */
  698. if (tag_ops->disconnect)
  699. tag_ops->disconnect(ds);
  700. return err;
  701. }
  702. return 0;
  703. }
  704. static int
  705. dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
  706. struct dsa_notifier_tag_proto_info *info)
  707. {
  708. const struct dsa_device_ops *tag_ops = info->tag_ops;
  709. /* Notify the tagger about the disconnection from this switch */
  710. if (tag_ops->disconnect && ds->tagger_data)
  711. tag_ops->disconnect(ds);
  712. /* No need to notify the switch, since it shouldn't have any
  713. * resources to tear down
  714. */
  715. return 0;
  716. }
  717. static int
  718. dsa_switch_master_state_change(struct dsa_switch *ds,
  719. struct dsa_notifier_master_state_info *info)
  720. {
  721. if (!ds->ops->master_state_change)
  722. return 0;
  723. ds->ops->master_state_change(ds, info->master, info->operational);
  724. return 0;
  725. }
  726. static int dsa_switch_event(struct notifier_block *nb,
  727. unsigned long event, void *info)
  728. {
  729. struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
  730. int err;
  731. switch (event) {
  732. case DSA_NOTIFIER_AGEING_TIME:
  733. err = dsa_switch_ageing_time(ds, info);
  734. break;
  735. case DSA_NOTIFIER_BRIDGE_JOIN:
  736. err = dsa_switch_bridge_join(ds, info);
  737. break;
  738. case DSA_NOTIFIER_BRIDGE_LEAVE:
  739. err = dsa_switch_bridge_leave(ds, info);
  740. break;
  741. case DSA_NOTIFIER_FDB_ADD:
  742. err = dsa_switch_fdb_add(ds, info);
  743. break;
  744. case DSA_NOTIFIER_FDB_DEL:
  745. err = dsa_switch_fdb_del(ds, info);
  746. break;
  747. case DSA_NOTIFIER_HOST_FDB_ADD:
  748. err = dsa_switch_host_fdb_add(ds, info);
  749. break;
  750. case DSA_NOTIFIER_HOST_FDB_DEL:
  751. err = dsa_switch_host_fdb_del(ds, info);
  752. break;
  753. case DSA_NOTIFIER_LAG_FDB_ADD:
  754. err = dsa_switch_lag_fdb_add(ds, info);
  755. break;
  756. case DSA_NOTIFIER_LAG_FDB_DEL:
  757. err = dsa_switch_lag_fdb_del(ds, info);
  758. break;
  759. case DSA_NOTIFIER_LAG_CHANGE:
  760. err = dsa_switch_lag_change(ds, info);
  761. break;
  762. case DSA_NOTIFIER_LAG_JOIN:
  763. err = dsa_switch_lag_join(ds, info);
  764. break;
  765. case DSA_NOTIFIER_LAG_LEAVE:
  766. err = dsa_switch_lag_leave(ds, info);
  767. break;
  768. case DSA_NOTIFIER_MDB_ADD:
  769. err = dsa_switch_mdb_add(ds, info);
  770. break;
  771. case DSA_NOTIFIER_MDB_DEL:
  772. err = dsa_switch_mdb_del(ds, info);
  773. break;
  774. case DSA_NOTIFIER_HOST_MDB_ADD:
  775. err = dsa_switch_host_mdb_add(ds, info);
  776. break;
  777. case DSA_NOTIFIER_HOST_MDB_DEL:
  778. err = dsa_switch_host_mdb_del(ds, info);
  779. break;
  780. case DSA_NOTIFIER_VLAN_ADD:
  781. err = dsa_switch_vlan_add(ds, info);
  782. break;
  783. case DSA_NOTIFIER_VLAN_DEL:
  784. err = dsa_switch_vlan_del(ds, info);
  785. break;
  786. case DSA_NOTIFIER_HOST_VLAN_ADD:
  787. err = dsa_switch_host_vlan_add(ds, info);
  788. break;
  789. case DSA_NOTIFIER_HOST_VLAN_DEL:
  790. err = dsa_switch_host_vlan_del(ds, info);
  791. break;
  792. case DSA_NOTIFIER_MTU:
  793. err = dsa_switch_mtu(ds, info);
  794. break;
  795. case DSA_NOTIFIER_TAG_PROTO:
  796. err = dsa_switch_change_tag_proto(ds, info);
  797. break;
  798. case DSA_NOTIFIER_TAG_PROTO_CONNECT:
  799. err = dsa_switch_connect_tag_proto(ds, info);
  800. break;
  801. case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
  802. err = dsa_switch_disconnect_tag_proto(ds, info);
  803. break;
  804. case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
  805. err = dsa_switch_tag_8021q_vlan_add(ds, info);
  806. break;
  807. case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
  808. err = dsa_switch_tag_8021q_vlan_del(ds, info);
  809. break;
  810. case DSA_NOTIFIER_MASTER_STATE_CHANGE:
  811. err = dsa_switch_master_state_change(ds, info);
  812. break;
  813. default:
  814. err = -EOPNOTSUPP;
  815. break;
  816. }
  817. if (err)
  818. dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
  819. event, err);
  820. return notifier_from_errno(err);
  821. }
  822. int dsa_switch_register_notifier(struct dsa_switch *ds)
  823. {
  824. ds->nb.notifier_call = dsa_switch_event;
  825. return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
  826. }
  827. void dsa_switch_unregister_notifier(struct dsa_switch *ds)
  828. {
  829. int err;
  830. err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
  831. if (err)
  832. dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
  833. }