dsa2.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * net/dsa/dsa2.c - Hardware switch handling, binding version 2
  4. * Copyright (c) 2008-2009 Marvell Semiconductor
  5. * Copyright (c) 2013 Florian Fainelli <[email protected]>
  6. * Copyright (c) 2016 Andrew Lunn <[email protected]>
  7. */
  8. #include <linux/device.h>
  9. #include <linux/err.h>
  10. #include <linux/list.h>
  11. #include <linux/netdevice.h>
  12. #include <linux/slab.h>
  13. #include <linux/rtnetlink.h>
  14. #include <linux/of.h>
  15. #include <linux/of_mdio.h>
  16. #include <linux/of_net.h>
  17. #include <net/devlink.h>
  18. #include <net/sch_generic.h>
  19. #include "dsa_priv.h"
  20. static DEFINE_MUTEX(dsa2_mutex);
  21. LIST_HEAD(dsa_tree_list);
  22. /* Track the bridges with forwarding offload enabled */
  23. static unsigned long dsa_fwd_offloading_bridges;
  24. /**
  25. * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
  26. * @dst: collection of struct dsa_switch devices to notify.
  27. * @e: event, must be of type DSA_NOTIFIER_*
  28. * @v: event-specific value.
  29. *
  30. * Given a struct dsa_switch_tree, this can be used to run a function once for
  31. * each member DSA switch. The other alternative of traversing the tree is only
  32. * through its ports list, which does not uniquely list the switches.
  33. */
  34. int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
  35. {
  36. struct raw_notifier_head *nh = &dst->nh;
  37. int err;
  38. err = raw_notifier_call_chain(nh, e, v);
  39. return notifier_to_errno(err);
  40. }
  41. /**
  42. * dsa_broadcast - Notify all DSA trees in the system.
  43. * @e: event, must be of type DSA_NOTIFIER_*
  44. * @v: event-specific value.
  45. *
  46. * Can be used to notify the switching fabric of events such as cross-chip
  47. * bridging between disjoint trees (such as islands of tagger-compatible
  48. * switches bridged by an incompatible middle switch).
  49. *
  50. * WARNING: this function is not reliable during probe time, because probing
  51. * between trees is asynchronous and not all DSA trees might have probed.
  52. */
  53. int dsa_broadcast(unsigned long e, void *v)
  54. {
  55. struct dsa_switch_tree *dst;
  56. int err = 0;
  57. list_for_each_entry(dst, &dsa_tree_list, list) {
  58. err = dsa_tree_notify(dst, e, v);
  59. if (err)
  60. break;
  61. }
  62. return err;
  63. }
  64. /**
  65. * dsa_lag_map() - Map LAG structure to a linear LAG array
  66. * @dst: Tree in which to record the mapping.
  67. * @lag: LAG structure that is to be mapped to the tree's array.
  68. *
  69. * dsa_lag_id/dsa_lag_by_id can then be used to translate between the
  70. * two spaces. The size of the mapping space is determined by the
  71. * driver by setting ds->num_lag_ids. It is perfectly legal to leave
  72. * it unset if it is not needed, in which case these functions become
  73. * no-ops.
  74. */
  75. void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
  76. {
  77. unsigned int id;
  78. for (id = 1; id <= dst->lags_len; id++) {
  79. if (!dsa_lag_by_id(dst, id)) {
  80. dst->lags[id - 1] = lag;
  81. lag->id = id;
  82. return;
  83. }
  84. }
  85. /* No IDs left, which is OK. Some drivers do not need it. The
  86. * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
  87. * returns an error for this device when joining the LAG. The
  88. * driver can then return -EOPNOTSUPP back to DSA, which will
  89. * fall back to a software LAG.
  90. */
  91. }
  92. /**
  93. * dsa_lag_unmap() - Remove a LAG ID mapping
  94. * @dst: Tree in which the mapping is recorded.
  95. * @lag: LAG structure that was mapped.
  96. *
  97. * As there may be multiple users of the mapping, it is only removed
  98. * if there are no other references to it.
  99. */
  100. void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
  101. {
  102. unsigned int id;
  103. dsa_lags_foreach_id(id, dst) {
  104. if (dsa_lag_by_id(dst, id) == lag) {
  105. dst->lags[id - 1] = NULL;
  106. lag->id = 0;
  107. break;
  108. }
  109. }
  110. }
  111. struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
  112. const struct net_device *lag_dev)
  113. {
  114. struct dsa_port *dp;
  115. list_for_each_entry(dp, &dst->ports, list)
  116. if (dsa_port_lag_dev_get(dp) == lag_dev)
  117. return dp->lag;
  118. return NULL;
  119. }
  120. struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
  121. const struct net_device *br)
  122. {
  123. struct dsa_port *dp;
  124. list_for_each_entry(dp, &dst->ports, list)
  125. if (dsa_port_bridge_dev_get(dp) == br)
  126. return dp->bridge;
  127. return NULL;
  128. }
  129. static int dsa_bridge_num_find(const struct net_device *bridge_dev)
  130. {
  131. struct dsa_switch_tree *dst;
  132. list_for_each_entry(dst, &dsa_tree_list, list) {
  133. struct dsa_bridge *bridge;
  134. bridge = dsa_tree_bridge_find(dst, bridge_dev);
  135. if (bridge)
  136. return bridge->num;
  137. }
  138. return 0;
  139. }
  140. unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
  141. {
  142. unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
  143. /* Switches without FDB isolation support don't get unique
  144. * bridge numbering
  145. */
  146. if (!max)
  147. return 0;
  148. if (!bridge_num) {
  149. /* First port that requests FDB isolation or TX forwarding
  150. * offload for this bridge
  151. */
  152. bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
  153. DSA_MAX_NUM_OFFLOADING_BRIDGES,
  154. 1);
  155. if (bridge_num >= max)
  156. return 0;
  157. set_bit(bridge_num, &dsa_fwd_offloading_bridges);
  158. }
  159. return bridge_num;
  160. }
  161. void dsa_bridge_num_put(const struct net_device *bridge_dev,
  162. unsigned int bridge_num)
  163. {
  164. /* Since we refcount bridges, we know that when we call this function
  165. * it is no longer in use, so we can just go ahead and remove it from
  166. * the bit mask.
  167. */
  168. clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
  169. }
  170. struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
  171. {
  172. struct dsa_switch_tree *dst;
  173. struct dsa_port *dp;
  174. list_for_each_entry(dst, &dsa_tree_list, list) {
  175. if (dst->index != tree_index)
  176. continue;
  177. list_for_each_entry(dp, &dst->ports, list) {
  178. if (dp->ds->index != sw_index)
  179. continue;
  180. return dp->ds;
  181. }
  182. }
  183. return NULL;
  184. }
  185. EXPORT_SYMBOL_GPL(dsa_switch_find);
  186. static struct dsa_switch_tree *dsa_tree_find(int index)
  187. {
  188. struct dsa_switch_tree *dst;
  189. list_for_each_entry(dst, &dsa_tree_list, list)
  190. if (dst->index == index)
  191. return dst;
  192. return NULL;
  193. }
  194. static struct dsa_switch_tree *dsa_tree_alloc(int index)
  195. {
  196. struct dsa_switch_tree *dst;
  197. dst = kzalloc(sizeof(*dst), GFP_KERNEL);
  198. if (!dst)
  199. return NULL;
  200. dst->index = index;
  201. INIT_LIST_HEAD(&dst->rtable);
  202. INIT_LIST_HEAD(&dst->ports);
  203. INIT_LIST_HEAD(&dst->list);
  204. list_add_tail(&dst->list, &dsa_tree_list);
  205. kref_init(&dst->refcount);
  206. return dst;
  207. }
  208. static void dsa_tree_free(struct dsa_switch_tree *dst)
  209. {
  210. if (dst->tag_ops)
  211. dsa_tag_driver_put(dst->tag_ops);
  212. list_del(&dst->list);
  213. kfree(dst);
  214. }
  215. static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
  216. {
  217. if (dst)
  218. kref_get(&dst->refcount);
  219. return dst;
  220. }
  221. static struct dsa_switch_tree *dsa_tree_touch(int index)
  222. {
  223. struct dsa_switch_tree *dst;
  224. dst = dsa_tree_find(index);
  225. if (dst)
  226. return dsa_tree_get(dst);
  227. else
  228. return dsa_tree_alloc(index);
  229. }
  230. static void dsa_tree_release(struct kref *ref)
  231. {
  232. struct dsa_switch_tree *dst;
  233. dst = container_of(ref, struct dsa_switch_tree, refcount);
  234. dsa_tree_free(dst);
  235. }
  236. static void dsa_tree_put(struct dsa_switch_tree *dst)
  237. {
  238. if (dst)
  239. kref_put(&dst->refcount, dsa_tree_release);
  240. }
  241. static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
  242. struct device_node *dn)
  243. {
  244. struct dsa_port *dp;
  245. list_for_each_entry(dp, &dst->ports, list)
  246. if (dp->dn == dn)
  247. return dp;
  248. return NULL;
  249. }
  250. static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
  251. struct dsa_port *link_dp)
  252. {
  253. struct dsa_switch *ds = dp->ds;
  254. struct dsa_switch_tree *dst;
  255. struct dsa_link *dl;
  256. dst = ds->dst;
  257. list_for_each_entry(dl, &dst->rtable, list)
  258. if (dl->dp == dp && dl->link_dp == link_dp)
  259. return dl;
  260. dl = kzalloc(sizeof(*dl), GFP_KERNEL);
  261. if (!dl)
  262. return NULL;
  263. dl->dp = dp;
  264. dl->link_dp = link_dp;
  265. INIT_LIST_HEAD(&dl->list);
  266. list_add_tail(&dl->list, &dst->rtable);
  267. return dl;
  268. }
  269. static bool dsa_port_setup_routing_table(struct dsa_port *dp)
  270. {
  271. struct dsa_switch *ds = dp->ds;
  272. struct dsa_switch_tree *dst = ds->dst;
  273. struct device_node *dn = dp->dn;
  274. struct of_phandle_iterator it;
  275. struct dsa_port *link_dp;
  276. struct dsa_link *dl;
  277. int err;
  278. of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
  279. link_dp = dsa_tree_find_port_by_node(dst, it.node);
  280. if (!link_dp) {
  281. of_node_put(it.node);
  282. return false;
  283. }
  284. dl = dsa_link_touch(dp, link_dp);
  285. if (!dl) {
  286. of_node_put(it.node);
  287. return false;
  288. }
  289. }
  290. return true;
  291. }
  292. static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
  293. {
  294. bool complete = true;
  295. struct dsa_port *dp;
  296. list_for_each_entry(dp, &dst->ports, list) {
  297. if (dsa_port_is_dsa(dp)) {
  298. complete = dsa_port_setup_routing_table(dp);
  299. if (!complete)
  300. break;
  301. }
  302. }
  303. return complete;
  304. }
  305. static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
  306. {
  307. struct dsa_port *dp;
  308. list_for_each_entry(dp, &dst->ports, list)
  309. if (dsa_port_is_cpu(dp))
  310. return dp;
  311. return NULL;
  312. }
  313. struct net_device *dsa_tree_find_first_master(struct dsa_switch_tree *dst)
  314. {
  315. struct device_node *ethernet;
  316. struct net_device *master;
  317. struct dsa_port *cpu_dp;
  318. cpu_dp = dsa_tree_find_first_cpu(dst);
  319. ethernet = of_parse_phandle(cpu_dp->dn, "ethernet", 0);
  320. master = of_find_net_device_by_node(ethernet);
  321. of_node_put(ethernet);
  322. return master;
  323. }
  324. /* Assign the default CPU port (the first one in the tree) to all ports of the
  325. * fabric which don't already have one as part of their own switch.
  326. */
  327. static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
  328. {
  329. struct dsa_port *cpu_dp, *dp;
  330. cpu_dp = dsa_tree_find_first_cpu(dst);
  331. if (!cpu_dp) {
  332. pr_err("DSA: tree %d has no CPU port\n", dst->index);
  333. return -EINVAL;
  334. }
  335. list_for_each_entry(dp, &dst->ports, list) {
  336. if (dp->cpu_dp)
  337. continue;
  338. if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
  339. dp->cpu_dp = cpu_dp;
  340. }
  341. return 0;
  342. }
  343. /* Perform initial assignment of CPU ports to user ports and DSA links in the
  344. * fabric, giving preference to CPU ports local to each switch. Default to
  345. * using the first CPU port in the switch tree if the port does not have a CPU
  346. * port local to this switch.
  347. */
  348. static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
  349. {
  350. struct dsa_port *cpu_dp, *dp;
  351. list_for_each_entry(cpu_dp, &dst->ports, list) {
  352. if (!dsa_port_is_cpu(cpu_dp))
  353. continue;
  354. /* Prefer a local CPU port */
  355. dsa_switch_for_each_port(dp, cpu_dp->ds) {
  356. /* Prefer the first local CPU port found */
  357. if (dp->cpu_dp)
  358. continue;
  359. if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
  360. dp->cpu_dp = cpu_dp;
  361. }
  362. }
  363. return dsa_tree_setup_default_cpu(dst);
  364. }
  365. static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
  366. {
  367. struct dsa_port *dp;
  368. list_for_each_entry(dp, &dst->ports, list)
  369. if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
  370. dp->cpu_dp = NULL;
  371. }
  372. static int dsa_port_devlink_setup(struct dsa_port *dp)
  373. {
  374. struct devlink_port *dlp = &dp->devlink_port;
  375. struct dsa_switch_tree *dst = dp->ds->dst;
  376. struct devlink_port_attrs attrs = {};
  377. struct devlink *dl = dp->ds->devlink;
  378. struct dsa_switch *ds = dp->ds;
  379. const unsigned char *id;
  380. unsigned char len;
  381. int err;
  382. memset(dlp, 0, sizeof(*dlp));
  383. devlink_port_init(dl, dlp);
  384. if (ds->ops->port_setup) {
  385. err = ds->ops->port_setup(ds, dp->index);
  386. if (err)
  387. return err;
  388. }
  389. id = (const unsigned char *)&dst->index;
  390. len = sizeof(dst->index);
  391. attrs.phys.port_number = dp->index;
  392. memcpy(attrs.switch_id.id, id, len);
  393. attrs.switch_id.id_len = len;
  394. switch (dp->type) {
  395. case DSA_PORT_TYPE_UNUSED:
  396. attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
  397. break;
  398. case DSA_PORT_TYPE_CPU:
  399. attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
  400. break;
  401. case DSA_PORT_TYPE_DSA:
  402. attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
  403. break;
  404. case DSA_PORT_TYPE_USER:
  405. attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
  406. break;
  407. }
  408. devlink_port_attrs_set(dlp, &attrs);
  409. err = devlink_port_register(dl, dlp, dp->index);
  410. if (err) {
  411. if (ds->ops->port_teardown)
  412. ds->ops->port_teardown(ds, dp->index);
  413. return err;
  414. }
  415. return 0;
  416. }
  417. static void dsa_port_devlink_teardown(struct dsa_port *dp)
  418. {
  419. struct devlink_port *dlp = &dp->devlink_port;
  420. struct dsa_switch *ds = dp->ds;
  421. devlink_port_unregister(dlp);
  422. if (ds->ops->port_teardown)
  423. ds->ops->port_teardown(ds, dp->index);
  424. devlink_port_fini(dlp);
  425. }
  426. static int dsa_port_setup(struct dsa_port *dp)
  427. {
  428. struct devlink_port *dlp = &dp->devlink_port;
  429. bool dsa_port_link_registered = false;
  430. struct dsa_switch *ds = dp->ds;
  431. bool dsa_port_enabled = false;
  432. int err = 0;
  433. if (dp->setup)
  434. return 0;
  435. err = dsa_port_devlink_setup(dp);
  436. if (err)
  437. return err;
  438. switch (dp->type) {
  439. case DSA_PORT_TYPE_UNUSED:
  440. dsa_port_disable(dp);
  441. break;
  442. case DSA_PORT_TYPE_CPU:
  443. if (dp->dn) {
  444. err = dsa_shared_port_link_register_of(dp);
  445. if (err)
  446. break;
  447. dsa_port_link_registered = true;
  448. } else {
  449. dev_warn(ds->dev,
  450. "skipping link registration for CPU port %d\n",
  451. dp->index);
  452. }
  453. err = dsa_port_enable(dp, NULL);
  454. if (err)
  455. break;
  456. dsa_port_enabled = true;
  457. break;
  458. case DSA_PORT_TYPE_DSA:
  459. if (dp->dn) {
  460. err = dsa_shared_port_link_register_of(dp);
  461. if (err)
  462. break;
  463. dsa_port_link_registered = true;
  464. } else {
  465. dev_warn(ds->dev,
  466. "skipping link registration for DSA port %d\n",
  467. dp->index);
  468. }
  469. err = dsa_port_enable(dp, NULL);
  470. if (err)
  471. break;
  472. dsa_port_enabled = true;
  473. break;
  474. case DSA_PORT_TYPE_USER:
  475. of_get_mac_address(dp->dn, dp->mac);
  476. err = dsa_slave_create(dp);
  477. if (err)
  478. break;
  479. devlink_port_type_eth_set(dlp, dp->slave);
  480. break;
  481. }
  482. if (err && dsa_port_enabled)
  483. dsa_port_disable(dp);
  484. if (err && dsa_port_link_registered)
  485. dsa_shared_port_link_unregister_of(dp);
  486. if (err) {
  487. dsa_port_devlink_teardown(dp);
  488. return err;
  489. }
  490. dp->setup = true;
  491. return 0;
  492. }
  493. static void dsa_port_teardown(struct dsa_port *dp)
  494. {
  495. struct devlink_port *dlp = &dp->devlink_port;
  496. if (!dp->setup)
  497. return;
  498. devlink_port_type_clear(dlp);
  499. switch (dp->type) {
  500. case DSA_PORT_TYPE_UNUSED:
  501. break;
  502. case DSA_PORT_TYPE_CPU:
  503. dsa_port_disable(dp);
  504. if (dp->dn)
  505. dsa_shared_port_link_unregister_of(dp);
  506. break;
  507. case DSA_PORT_TYPE_DSA:
  508. dsa_port_disable(dp);
  509. if (dp->dn)
  510. dsa_shared_port_link_unregister_of(dp);
  511. break;
  512. case DSA_PORT_TYPE_USER:
  513. if (dp->slave) {
  514. dsa_slave_destroy(dp->slave);
  515. dp->slave = NULL;
  516. }
  517. break;
  518. }
  519. dsa_port_devlink_teardown(dp);
  520. dp->setup = false;
  521. }
  522. static int dsa_port_setup_as_unused(struct dsa_port *dp)
  523. {
  524. dp->type = DSA_PORT_TYPE_UNUSED;
  525. return dsa_port_setup(dp);
  526. }
  527. static int dsa_devlink_info_get(struct devlink *dl,
  528. struct devlink_info_req *req,
  529. struct netlink_ext_ack *extack)
  530. {
  531. struct dsa_switch *ds = dsa_devlink_to_ds(dl);
  532. if (ds->ops->devlink_info_get)
  533. return ds->ops->devlink_info_get(ds, req, extack);
  534. return -EOPNOTSUPP;
  535. }
  536. static int dsa_devlink_sb_pool_get(struct devlink *dl,
  537. unsigned int sb_index, u16 pool_index,
  538. struct devlink_sb_pool_info *pool_info)
  539. {
  540. struct dsa_switch *ds = dsa_devlink_to_ds(dl);
  541. if (!ds->ops->devlink_sb_pool_get)
  542. return -EOPNOTSUPP;
  543. return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
  544. pool_info);
  545. }
  546. static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
  547. u16 pool_index, u32 size,
  548. enum devlink_sb_threshold_type threshold_type,
  549. struct netlink_ext_ack *extack)
  550. {
  551. struct dsa_switch *ds = dsa_devlink_to_ds(dl);
  552. if (!ds->ops->devlink_sb_pool_set)
  553. return -EOPNOTSUPP;
  554. return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
  555. threshold_type, extack);
  556. }
  557. static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
  558. unsigned int sb_index, u16 pool_index,
  559. u32 *p_threshold)
  560. {
  561. struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
  562. int port = dsa_devlink_port_to_port(dlp);
  563. if (!ds->ops->devlink_sb_port_pool_get)
  564. return -EOPNOTSUPP;
  565. return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
  566. pool_index, p_threshold);
  567. }
  568. static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
  569. unsigned int sb_index, u16 pool_index,
  570. u32 threshold,
  571. struct netlink_ext_ack *extack)
  572. {
  573. struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
  574. int port = dsa_devlink_port_to_port(dlp);
  575. if (!ds->ops->devlink_sb_port_pool_set)
  576. return -EOPNOTSUPP;
  577. return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
  578. pool_index, threshold, extack);
  579. }
  580. static int
  581. dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
  582. unsigned int sb_index, u16 tc_index,
  583. enum devlink_sb_pool_type pool_type,
  584. u16 *p_pool_index, u32 *p_threshold)
  585. {
  586. struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
  587. int port = dsa_devlink_port_to_port(dlp);
  588. if (!ds->ops->devlink_sb_tc_pool_bind_get)
  589. return -EOPNOTSUPP;
  590. return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
  591. tc_index, pool_type,
  592. p_pool_index, p_threshold);
  593. }
  594. static int
  595. dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
  596. unsigned int sb_index, u16 tc_index,
  597. enum devlink_sb_pool_type pool_type,
  598. u16 pool_index, u32 threshold,
  599. struct netlink_ext_ack *extack)
  600. {
  601. struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
  602. int port = dsa_devlink_port_to_port(dlp);
  603. if (!ds->ops->devlink_sb_tc_pool_bind_set)
  604. return -EOPNOTSUPP;
  605. return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
  606. tc_index, pool_type,
  607. pool_index, threshold,
  608. extack);
  609. }
  610. static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
  611. unsigned int sb_index)
  612. {
  613. struct dsa_switch *ds = dsa_devlink_to_ds(dl);
  614. if (!ds->ops->devlink_sb_occ_snapshot)
  615. return -EOPNOTSUPP;
  616. return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
  617. }
  618. static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
  619. unsigned int sb_index)
  620. {
  621. struct dsa_switch *ds = dsa_devlink_to_ds(dl);
  622. if (!ds->ops->devlink_sb_occ_max_clear)
  623. return -EOPNOTSUPP;
  624. return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
  625. }
  626. static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
  627. unsigned int sb_index,
  628. u16 pool_index, u32 *p_cur,
  629. u32 *p_max)
  630. {
  631. struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
  632. int port = dsa_devlink_port_to_port(dlp);
  633. if (!ds->ops->devlink_sb_occ_port_pool_get)
  634. return -EOPNOTSUPP;
  635. return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
  636. pool_index, p_cur, p_max);
  637. }
  638. static int
  639. dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
  640. unsigned int sb_index, u16 tc_index,
  641. enum devlink_sb_pool_type pool_type,
  642. u32 *p_cur, u32 *p_max)
  643. {
  644. struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
  645. int port = dsa_devlink_port_to_port(dlp);
  646. if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
  647. return -EOPNOTSUPP;
  648. return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
  649. sb_index, tc_index,
  650. pool_type, p_cur,
  651. p_max);
  652. }
  653. static const struct devlink_ops dsa_devlink_ops = {
  654. .info_get = dsa_devlink_info_get,
  655. .sb_pool_get = dsa_devlink_sb_pool_get,
  656. .sb_pool_set = dsa_devlink_sb_pool_set,
  657. .sb_port_pool_get = dsa_devlink_sb_port_pool_get,
  658. .sb_port_pool_set = dsa_devlink_sb_port_pool_set,
  659. .sb_tc_pool_bind_get = dsa_devlink_sb_tc_pool_bind_get,
  660. .sb_tc_pool_bind_set = dsa_devlink_sb_tc_pool_bind_set,
  661. .sb_occ_snapshot = dsa_devlink_sb_occ_snapshot,
  662. .sb_occ_max_clear = dsa_devlink_sb_occ_max_clear,
  663. .sb_occ_port_pool_get = dsa_devlink_sb_occ_port_pool_get,
  664. .sb_occ_tc_port_bind_get = dsa_devlink_sb_occ_tc_port_bind_get,
  665. };
  666. static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
  667. {
  668. const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
  669. struct dsa_switch_tree *dst = ds->dst;
  670. int err;
  671. if (tag_ops->proto == dst->default_proto)
  672. goto connect;
  673. rtnl_lock();
  674. err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
  675. rtnl_unlock();
  676. if (err) {
  677. dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
  678. tag_ops->name, ERR_PTR(err));
  679. return err;
  680. }
  681. connect:
  682. if (tag_ops->connect) {
  683. err = tag_ops->connect(ds);
  684. if (err)
  685. return err;
  686. }
  687. if (ds->ops->connect_tag_protocol) {
  688. err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
  689. if (err) {
  690. dev_err(ds->dev,
  691. "Unable to connect to tag protocol \"%s\": %pe\n",
  692. tag_ops->name, ERR_PTR(err));
  693. goto disconnect;
  694. }
  695. }
  696. return 0;
  697. disconnect:
  698. if (tag_ops->disconnect)
  699. tag_ops->disconnect(ds);
  700. return err;
  701. }
  702. static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
  703. {
  704. const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
  705. if (tag_ops->disconnect)
  706. tag_ops->disconnect(ds);
  707. }
  708. static int dsa_switch_setup(struct dsa_switch *ds)
  709. {
  710. struct dsa_devlink_priv *dl_priv;
  711. struct device_node *dn;
  712. int err;
  713. if (ds->setup)
  714. return 0;
  715. /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
  716. * driver and before ops->setup() has run, since the switch drivers and
  717. * the slave MDIO bus driver rely on these values for probing PHY
  718. * devices or not
  719. */
  720. ds->phys_mii_mask |= dsa_user_ports(ds);
  721. /* Add the switch to devlink before calling setup, so that setup can
  722. * add dpipe tables
  723. */
  724. ds->devlink =
  725. devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
  726. if (!ds->devlink)
  727. return -ENOMEM;
  728. dl_priv = devlink_priv(ds->devlink);
  729. dl_priv->ds = ds;
  730. err = dsa_switch_register_notifier(ds);
  731. if (err)
  732. goto devlink_free;
  733. ds->configure_vlan_while_not_filtering = true;
  734. err = ds->ops->setup(ds);
  735. if (err < 0)
  736. goto unregister_notifier;
  737. err = dsa_switch_setup_tag_protocol(ds);
  738. if (err)
  739. goto teardown;
  740. if (!ds->slave_mii_bus && ds->ops->phy_read) {
  741. ds->slave_mii_bus = mdiobus_alloc();
  742. if (!ds->slave_mii_bus) {
  743. err = -ENOMEM;
  744. goto teardown;
  745. }
  746. dsa_slave_mii_bus_init(ds);
  747. dn = of_get_child_by_name(ds->dev->of_node, "mdio");
  748. err = of_mdiobus_register(ds->slave_mii_bus, dn);
  749. of_node_put(dn);
  750. if (err < 0)
  751. goto free_slave_mii_bus;
  752. }
  753. ds->setup = true;
  754. devlink_register(ds->devlink);
  755. return 0;
  756. free_slave_mii_bus:
  757. if (ds->slave_mii_bus && ds->ops->phy_read)
  758. mdiobus_free(ds->slave_mii_bus);
  759. teardown:
  760. if (ds->ops->teardown)
  761. ds->ops->teardown(ds);
  762. unregister_notifier:
  763. dsa_switch_unregister_notifier(ds);
  764. devlink_free:
  765. devlink_free(ds->devlink);
  766. ds->devlink = NULL;
  767. return err;
  768. }
  769. static void dsa_switch_teardown(struct dsa_switch *ds)
  770. {
  771. if (!ds->setup)
  772. return;
  773. if (ds->devlink)
  774. devlink_unregister(ds->devlink);
  775. if (ds->slave_mii_bus && ds->ops->phy_read) {
  776. mdiobus_unregister(ds->slave_mii_bus);
  777. mdiobus_free(ds->slave_mii_bus);
  778. ds->slave_mii_bus = NULL;
  779. }
  780. dsa_switch_teardown_tag_protocol(ds);
  781. if (ds->ops->teardown)
  782. ds->ops->teardown(ds);
  783. dsa_switch_unregister_notifier(ds);
  784. if (ds->devlink) {
  785. devlink_free(ds->devlink);
  786. ds->devlink = NULL;
  787. }
  788. ds->setup = false;
  789. }
  790. /* First tear down the non-shared, then the shared ports. This ensures that
  791. * all work items scheduled by our switchdev handlers for user ports have
  792. * completed before we destroy the refcounting kept on the shared ports.
  793. */
  794. static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
  795. {
  796. struct dsa_port *dp;
  797. list_for_each_entry(dp, &dst->ports, list)
  798. if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
  799. dsa_port_teardown(dp);
  800. dsa_flush_workqueue();
  801. list_for_each_entry(dp, &dst->ports, list)
  802. if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
  803. dsa_port_teardown(dp);
  804. }
  805. static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
  806. {
  807. struct dsa_port *dp;
  808. list_for_each_entry(dp, &dst->ports, list)
  809. dsa_switch_teardown(dp->ds);
  810. }
  811. /* Bring shared ports up first, then non-shared ports */
  812. static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
  813. {
  814. struct dsa_port *dp;
  815. int err = 0;
  816. list_for_each_entry(dp, &dst->ports, list) {
  817. if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
  818. err = dsa_port_setup(dp);
  819. if (err)
  820. goto teardown;
  821. }
  822. }
  823. list_for_each_entry(dp, &dst->ports, list) {
  824. if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
  825. err = dsa_port_setup(dp);
  826. if (err) {
  827. err = dsa_port_setup_as_unused(dp);
  828. if (err)
  829. goto teardown;
  830. }
  831. }
  832. }
  833. return 0;
  834. teardown:
  835. dsa_tree_teardown_ports(dst);
  836. return err;
  837. }
  838. static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
  839. {
  840. struct dsa_port *dp;
  841. int err = 0;
  842. list_for_each_entry(dp, &dst->ports, list) {
  843. err = dsa_switch_setup(dp->ds);
  844. if (err) {
  845. dsa_tree_teardown_switches(dst);
  846. break;
  847. }
  848. }
  849. return err;
  850. }
  851. static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
  852. {
  853. struct dsa_port *cpu_dp;
  854. int err = 0;
  855. rtnl_lock();
  856. dsa_tree_for_each_cpu_port(cpu_dp, dst) {
  857. struct net_device *master = cpu_dp->master;
  858. bool admin_up = (master->flags & IFF_UP) &&
  859. !qdisc_tx_is_noop(master);
  860. err = dsa_master_setup(master, cpu_dp);
  861. if (err)
  862. break;
  863. /* Replay master state event */
  864. dsa_tree_master_admin_state_change(dst, master, admin_up);
  865. dsa_tree_master_oper_state_change(dst, master,
  866. netif_oper_up(master));
  867. }
  868. rtnl_unlock();
  869. return err;
  870. }
  871. static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
  872. {
  873. struct dsa_port *cpu_dp;
  874. rtnl_lock();
  875. dsa_tree_for_each_cpu_port(cpu_dp, dst) {
  876. struct net_device *master = cpu_dp->master;
  877. /* Synthesizing an "admin down" state is sufficient for
  878. * the switches to get a notification if the master is
  879. * currently up and running.
  880. */
  881. dsa_tree_master_admin_state_change(dst, master, false);
  882. dsa_master_teardown(master);
  883. }
  884. rtnl_unlock();
  885. }
  886. static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
  887. {
  888. unsigned int len = 0;
  889. struct dsa_port *dp;
  890. list_for_each_entry(dp, &dst->ports, list) {
  891. if (dp->ds->num_lag_ids > len)
  892. len = dp->ds->num_lag_ids;
  893. }
  894. if (!len)
  895. return 0;
  896. dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
  897. if (!dst->lags)
  898. return -ENOMEM;
  899. dst->lags_len = len;
  900. return 0;
  901. }
  902. static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
  903. {
  904. kfree(dst->lags);
  905. }
  906. static int dsa_tree_setup(struct dsa_switch_tree *dst)
  907. {
  908. bool complete;
  909. int err;
  910. if (dst->setup) {
  911. pr_err("DSA: tree %d already setup! Disjoint trees?\n",
  912. dst->index);
  913. return -EEXIST;
  914. }
  915. complete = dsa_tree_setup_routing_table(dst);
  916. if (!complete)
  917. return 0;
  918. err = dsa_tree_setup_cpu_ports(dst);
  919. if (err)
  920. return err;
  921. err = dsa_tree_setup_switches(dst);
  922. if (err)
  923. goto teardown_cpu_ports;
  924. err = dsa_tree_setup_ports(dst);
  925. if (err)
  926. goto teardown_switches;
  927. err = dsa_tree_setup_master(dst);
  928. if (err)
  929. goto teardown_ports;
  930. err = dsa_tree_setup_lags(dst);
  931. if (err)
  932. goto teardown_master;
  933. dst->setup = true;
  934. pr_info("DSA: tree %d setup\n", dst->index);
  935. return 0;
  936. teardown_master:
  937. dsa_tree_teardown_master(dst);
  938. teardown_ports:
  939. dsa_tree_teardown_ports(dst);
  940. teardown_switches:
  941. dsa_tree_teardown_switches(dst);
  942. teardown_cpu_ports:
  943. dsa_tree_teardown_cpu_ports(dst);
  944. return err;
  945. }
  946. static void dsa_tree_teardown(struct dsa_switch_tree *dst)
  947. {
  948. struct dsa_link *dl, *next;
  949. if (!dst->setup)
  950. return;
  951. dsa_tree_teardown_lags(dst);
  952. dsa_tree_teardown_master(dst);
  953. dsa_tree_teardown_ports(dst);
  954. dsa_tree_teardown_switches(dst);
  955. dsa_tree_teardown_cpu_ports(dst);
  956. list_for_each_entry_safe(dl, next, &dst->rtable, list) {
  957. list_del(&dl->list);
  958. kfree(dl);
  959. }
  960. pr_info("DSA: tree %d torn down\n", dst->index);
  961. dst->setup = false;
  962. }
  963. static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
  964. const struct dsa_device_ops *tag_ops)
  965. {
  966. const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
  967. struct dsa_notifier_tag_proto_info info;
  968. int err;
  969. dst->tag_ops = tag_ops;
  970. /* Notify the switches from this tree about the connection
  971. * to the new tagger
  972. */
  973. info.tag_ops = tag_ops;
  974. err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
  975. if (err && err != -EOPNOTSUPP)
  976. goto out_disconnect;
  977. /* Notify the old tagger about the disconnection from this tree */
  978. info.tag_ops = old_tag_ops;
  979. dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
  980. return 0;
  981. out_disconnect:
  982. info.tag_ops = tag_ops;
  983. dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
  984. dst->tag_ops = old_tag_ops;
  985. return err;
  986. }
  987. /* Since the dsa/tagging sysfs device attribute is per master, the assumption
  988. * is that all DSA switches within a tree share the same tagger, otherwise
  989. * they would have formed disjoint trees (different "dsa,member" values).
  990. */
  991. int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
  992. const struct dsa_device_ops *tag_ops,
  993. const struct dsa_device_ops *old_tag_ops)
  994. {
  995. struct dsa_notifier_tag_proto_info info;
  996. struct dsa_port *dp;
  997. int err = -EBUSY;
  998. if (!rtnl_trylock())
  999. return restart_syscall();
  1000. /* At the moment we don't allow changing the tag protocol under
  1001. * traffic. The rtnl_mutex also happens to serialize concurrent
  1002. * attempts to change the tagging protocol. If we ever lift the IFF_UP
  1003. * restriction, there needs to be another mutex which serializes this.
  1004. */
  1005. dsa_tree_for_each_user_port(dp, dst) {
  1006. if (dsa_port_to_master(dp)->flags & IFF_UP)
  1007. goto out_unlock;
  1008. if (dp->slave->flags & IFF_UP)
  1009. goto out_unlock;
  1010. }
  1011. /* Notify the tag protocol change */
  1012. info.tag_ops = tag_ops;
  1013. err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
  1014. if (err)
  1015. goto out_unwind_tagger;
  1016. err = dsa_tree_bind_tag_proto(dst, tag_ops);
  1017. if (err)
  1018. goto out_unwind_tagger;
  1019. rtnl_unlock();
  1020. return 0;
  1021. out_unwind_tagger:
  1022. info.tag_ops = old_tag_ops;
  1023. dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
  1024. out_unlock:
  1025. rtnl_unlock();
  1026. return err;
  1027. }
  1028. static void dsa_tree_master_state_change(struct dsa_switch_tree *dst,
  1029. struct net_device *master)
  1030. {
  1031. struct dsa_notifier_master_state_info info;
  1032. struct dsa_port *cpu_dp = master->dsa_ptr;
  1033. info.master = master;
  1034. info.operational = dsa_port_master_is_operational(cpu_dp);
  1035. dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info);
  1036. }
  1037. void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
  1038. struct net_device *master,
  1039. bool up)
  1040. {
  1041. struct dsa_port *cpu_dp = master->dsa_ptr;
  1042. bool notify = false;
  1043. /* Don't keep track of admin state on LAG DSA masters,
  1044. * but rather just of physical DSA masters
  1045. */
  1046. if (netif_is_lag_master(master))
  1047. return;
  1048. if ((dsa_port_master_is_operational(cpu_dp)) !=
  1049. (up && cpu_dp->master_oper_up))
  1050. notify = true;
  1051. cpu_dp->master_admin_up = up;
  1052. if (notify)
  1053. dsa_tree_master_state_change(dst, master);
  1054. }
  1055. void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
  1056. struct net_device *master,
  1057. bool up)
  1058. {
  1059. struct dsa_port *cpu_dp = master->dsa_ptr;
  1060. bool notify = false;
  1061. /* Don't keep track of oper state on LAG DSA masters,
  1062. * but rather just of physical DSA masters
  1063. */
  1064. if (netif_is_lag_master(master))
  1065. return;
  1066. if ((dsa_port_master_is_operational(cpu_dp)) !=
  1067. (cpu_dp->master_admin_up && up))
  1068. notify = true;
  1069. cpu_dp->master_oper_up = up;
  1070. if (notify)
  1071. dsa_tree_master_state_change(dst, master);
  1072. }
  1073. static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
  1074. {
  1075. struct dsa_switch_tree *dst = ds->dst;
  1076. struct dsa_port *dp;
  1077. dsa_switch_for_each_port(dp, ds)
  1078. if (dp->index == index)
  1079. return dp;
  1080. dp = kzalloc(sizeof(*dp), GFP_KERNEL);
  1081. if (!dp)
  1082. return NULL;
  1083. dp->ds = ds;
  1084. dp->index = index;
  1085. mutex_init(&dp->addr_lists_lock);
  1086. mutex_init(&dp->vlans_lock);
  1087. INIT_LIST_HEAD(&dp->fdbs);
  1088. INIT_LIST_HEAD(&dp->mdbs);
  1089. INIT_LIST_HEAD(&dp->vlans);
  1090. INIT_LIST_HEAD(&dp->list);
  1091. list_add_tail(&dp->list, &dst->ports);
  1092. return dp;
  1093. }
  1094. static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
  1095. {
  1096. if (!name)
  1097. name = "eth%d";
  1098. dp->type = DSA_PORT_TYPE_USER;
  1099. dp->name = name;
  1100. return 0;
  1101. }
  1102. static int dsa_port_parse_dsa(struct dsa_port *dp)
  1103. {
  1104. dp->type = DSA_PORT_TYPE_DSA;
  1105. return 0;
  1106. }
  1107. static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
  1108. struct net_device *master)
  1109. {
  1110. enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
  1111. struct dsa_switch *mds, *ds = dp->ds;
  1112. unsigned int mdp_upstream;
  1113. struct dsa_port *mdp;
  1114. /* It is possible to stack DSA switches onto one another when that
  1115. * happens the switch driver may want to know if its tagging protocol
  1116. * is going to work in such a configuration.
  1117. */
  1118. if (dsa_slave_dev_check(master)) {
  1119. mdp = dsa_slave_to_port(master);
  1120. mds = mdp->ds;
  1121. mdp_upstream = dsa_upstream_port(mds, mdp->index);
  1122. tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
  1123. DSA_TAG_PROTO_NONE);
  1124. }
  1125. /* If the master device is not itself a DSA slave in a disjoint DSA
  1126. * tree, then return immediately.
  1127. */
  1128. return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
  1129. }
  1130. static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
  1131. const char *user_protocol)
  1132. {
  1133. const struct dsa_device_ops *tag_ops = NULL;
  1134. struct dsa_switch *ds = dp->ds;
  1135. struct dsa_switch_tree *dst = ds->dst;
  1136. enum dsa_tag_protocol default_proto;
  1137. /* Find out which protocol the switch would prefer. */
  1138. default_proto = dsa_get_tag_protocol(dp, master);
  1139. if (dst->default_proto) {
  1140. if (dst->default_proto != default_proto) {
  1141. dev_err(ds->dev,
  1142. "A DSA switch tree can have only one tagging protocol\n");
  1143. return -EINVAL;
  1144. }
  1145. } else {
  1146. dst->default_proto = default_proto;
  1147. }
  1148. /* See if the user wants to override that preference. */
  1149. if (user_protocol) {
  1150. if (!ds->ops->change_tag_protocol) {
  1151. dev_err(ds->dev, "Tag protocol cannot be modified\n");
  1152. return -EINVAL;
  1153. }
  1154. tag_ops = dsa_find_tagger_by_name(user_protocol);
  1155. if (IS_ERR(tag_ops)) {
  1156. dev_warn(ds->dev,
  1157. "Failed to find a tagging driver for protocol %s, using default\n",
  1158. user_protocol);
  1159. tag_ops = NULL;
  1160. }
  1161. }
  1162. if (!tag_ops)
  1163. tag_ops = dsa_tag_driver_get(default_proto);
  1164. if (IS_ERR(tag_ops)) {
  1165. if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
  1166. return -EPROBE_DEFER;
  1167. dev_warn(ds->dev, "No tagger for this switch\n");
  1168. return PTR_ERR(tag_ops);
  1169. }
  1170. if (dst->tag_ops) {
  1171. if (dst->tag_ops != tag_ops) {
  1172. dev_err(ds->dev,
  1173. "A DSA switch tree can have only one tagging protocol\n");
  1174. dsa_tag_driver_put(tag_ops);
  1175. return -EINVAL;
  1176. }
  1177. /* In the case of multiple CPU ports per switch, the tagging
  1178. * protocol is still reference-counted only per switch tree.
  1179. */
  1180. dsa_tag_driver_put(tag_ops);
  1181. } else {
  1182. dst->tag_ops = tag_ops;
  1183. }
  1184. dp->master = master;
  1185. dp->type = DSA_PORT_TYPE_CPU;
  1186. dsa_port_set_tag_protocol(dp, dst->tag_ops);
  1187. dp->dst = dst;
  1188. /* At this point, the tree may be configured to use a different
  1189. * tagger than the one chosen by the switch driver during
  1190. * .setup, in the case when a user selects a custom protocol
  1191. * through the DT.
  1192. *
  1193. * This is resolved by syncing the driver with the tree in
  1194. * dsa_switch_setup_tag_protocol once .setup has run and the
  1195. * driver is ready to accept calls to .change_tag_protocol. If
  1196. * the driver does not support the custom protocol at that
  1197. * point, the tree is wholly rejected, thereby ensuring that the
  1198. * tree and driver are always in agreement on the protocol to
  1199. * use.
  1200. */
  1201. return 0;
  1202. }
  1203. static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
  1204. {
  1205. struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
  1206. const char *name = of_get_property(dn, "label", NULL);
  1207. bool link = of_property_read_bool(dn, "link");
  1208. dp->dn = dn;
  1209. if (ethernet) {
  1210. struct net_device *master;
  1211. const char *user_protocol;
  1212. master = of_find_net_device_by_node(ethernet);
  1213. of_node_put(ethernet);
  1214. if (!master)
  1215. return -EPROBE_DEFER;
  1216. user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
  1217. return dsa_port_parse_cpu(dp, master, user_protocol);
  1218. }
  1219. if (link)
  1220. return dsa_port_parse_dsa(dp);
  1221. return dsa_port_parse_user(dp, name);
  1222. }
  1223. static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
  1224. struct device_node *dn)
  1225. {
  1226. struct device_node *ports, *port;
  1227. struct dsa_port *dp;
  1228. int err = 0;
  1229. u32 reg;
  1230. ports = of_get_child_by_name(dn, "ports");
  1231. if (!ports) {
  1232. /* The second possibility is "ethernet-ports" */
  1233. ports = of_get_child_by_name(dn, "ethernet-ports");
  1234. if (!ports) {
  1235. dev_err(ds->dev, "no ports child node found\n");
  1236. return -EINVAL;
  1237. }
  1238. }
  1239. for_each_available_child_of_node(ports, port) {
  1240. err = of_property_read_u32(port, "reg", &reg);
  1241. if (err) {
  1242. of_node_put(port);
  1243. goto out_put_node;
  1244. }
  1245. if (reg >= ds->num_ports) {
  1246. dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
  1247. port, reg, ds->num_ports);
  1248. of_node_put(port);
  1249. err = -EINVAL;
  1250. goto out_put_node;
  1251. }
  1252. dp = dsa_to_port(ds, reg);
  1253. err = dsa_port_parse_of(dp, port);
  1254. if (err) {
  1255. of_node_put(port);
  1256. goto out_put_node;
  1257. }
  1258. }
  1259. out_put_node:
  1260. of_node_put(ports);
  1261. return err;
  1262. }
  1263. static int dsa_switch_parse_member_of(struct dsa_switch *ds,
  1264. struct device_node *dn)
  1265. {
  1266. u32 m[2] = { 0, 0 };
  1267. int sz;
  1268. /* Don't error out if this optional property isn't found */
  1269. sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
  1270. if (sz < 0 && sz != -EINVAL)
  1271. return sz;
  1272. ds->index = m[1];
  1273. ds->dst = dsa_tree_touch(m[0]);
  1274. if (!ds->dst)
  1275. return -ENOMEM;
  1276. if (dsa_switch_find(ds->dst->index, ds->index)) {
  1277. dev_err(ds->dev,
  1278. "A DSA switch with index %d already exists in tree %d\n",
  1279. ds->index, ds->dst->index);
  1280. return -EEXIST;
  1281. }
  1282. if (ds->dst->last_switch < ds->index)
  1283. ds->dst->last_switch = ds->index;
  1284. return 0;
  1285. }
  1286. static int dsa_switch_touch_ports(struct dsa_switch *ds)
  1287. {
  1288. struct dsa_port *dp;
  1289. int port;
  1290. for (port = 0; port < ds->num_ports; port++) {
  1291. dp = dsa_port_touch(ds, port);
  1292. if (!dp)
  1293. return -ENOMEM;
  1294. }
  1295. return 0;
  1296. }
  1297. static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
  1298. {
  1299. int err;
  1300. err = dsa_switch_parse_member_of(ds, dn);
  1301. if (err)
  1302. return err;
  1303. err = dsa_switch_touch_ports(ds);
  1304. if (err)
  1305. return err;
  1306. return dsa_switch_parse_ports_of(ds, dn);
  1307. }
  1308. static int dsa_port_parse(struct dsa_port *dp, const char *name,
  1309. struct device *dev)
  1310. {
  1311. if (!strcmp(name, "cpu")) {
  1312. struct net_device *master;
  1313. master = dsa_dev_to_net_device(dev);
  1314. if (!master)
  1315. return -EPROBE_DEFER;
  1316. dev_put(master);
  1317. return dsa_port_parse_cpu(dp, master, NULL);
  1318. }
  1319. if (!strcmp(name, "dsa"))
  1320. return dsa_port_parse_dsa(dp);
  1321. return dsa_port_parse_user(dp, name);
  1322. }
  1323. static int dsa_switch_parse_ports(struct dsa_switch *ds,
  1324. struct dsa_chip_data *cd)
  1325. {
  1326. bool valid_name_found = false;
  1327. struct dsa_port *dp;
  1328. struct device *dev;
  1329. const char *name;
  1330. unsigned int i;
  1331. int err;
  1332. for (i = 0; i < DSA_MAX_PORTS; i++) {
  1333. name = cd->port_names[i];
  1334. dev = cd->netdev[i];
  1335. dp = dsa_to_port(ds, i);
  1336. if (!name)
  1337. continue;
  1338. err = dsa_port_parse(dp, name, dev);
  1339. if (err)
  1340. return err;
  1341. valid_name_found = true;
  1342. }
  1343. if (!valid_name_found && i == DSA_MAX_PORTS)
  1344. return -EINVAL;
  1345. return 0;
  1346. }
  1347. static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
  1348. {
  1349. int err;
  1350. ds->cd = cd;
  1351. /* We don't support interconnected switches nor multiple trees via
  1352. * platform data, so this is the unique switch of the tree.
  1353. */
  1354. ds->index = 0;
  1355. ds->dst = dsa_tree_touch(0);
  1356. if (!ds->dst)
  1357. return -ENOMEM;
  1358. err = dsa_switch_touch_ports(ds);
  1359. if (err)
  1360. return err;
  1361. return dsa_switch_parse_ports(ds, cd);
  1362. }
  1363. static void dsa_switch_release_ports(struct dsa_switch *ds)
  1364. {
  1365. struct dsa_port *dp, *next;
  1366. dsa_switch_for_each_port_safe(dp, next, ds) {
  1367. WARN_ON(!list_empty(&dp->fdbs));
  1368. WARN_ON(!list_empty(&dp->mdbs));
  1369. WARN_ON(!list_empty(&dp->vlans));
  1370. list_del(&dp->list);
  1371. kfree(dp);
  1372. }
  1373. }
  1374. static int dsa_switch_probe(struct dsa_switch *ds)
  1375. {
  1376. struct dsa_switch_tree *dst;
  1377. struct dsa_chip_data *pdata;
  1378. struct device_node *np;
  1379. int err;
  1380. if (!ds->dev)
  1381. return -ENODEV;
  1382. pdata = ds->dev->platform_data;
  1383. np = ds->dev->of_node;
  1384. if (!ds->num_ports)
  1385. return -EINVAL;
  1386. if (np) {
  1387. err = dsa_switch_parse_of(ds, np);
  1388. if (err)
  1389. dsa_switch_release_ports(ds);
  1390. } else if (pdata) {
  1391. err = dsa_switch_parse(ds, pdata);
  1392. if (err)
  1393. dsa_switch_release_ports(ds);
  1394. } else {
  1395. err = -ENODEV;
  1396. }
  1397. if (err)
  1398. return err;
  1399. dst = ds->dst;
  1400. dsa_tree_get(dst);
  1401. err = dsa_tree_setup(dst);
  1402. if (err) {
  1403. dsa_switch_release_ports(ds);
  1404. dsa_tree_put(dst);
  1405. }
  1406. return err;
  1407. }
  1408. int dsa_register_switch(struct dsa_switch *ds)
  1409. {
  1410. int err;
  1411. mutex_lock(&dsa2_mutex);
  1412. err = dsa_switch_probe(ds);
  1413. dsa_tree_put(ds->dst);
  1414. mutex_unlock(&dsa2_mutex);
  1415. return err;
  1416. }
  1417. EXPORT_SYMBOL_GPL(dsa_register_switch);
  1418. static void dsa_switch_remove(struct dsa_switch *ds)
  1419. {
  1420. struct dsa_switch_tree *dst = ds->dst;
  1421. dsa_tree_teardown(dst);
  1422. dsa_switch_release_ports(ds);
  1423. dsa_tree_put(dst);
  1424. }
  1425. void dsa_unregister_switch(struct dsa_switch *ds)
  1426. {
  1427. mutex_lock(&dsa2_mutex);
  1428. dsa_switch_remove(ds);
  1429. mutex_unlock(&dsa2_mutex);
  1430. }
  1431. EXPORT_SYMBOL_GPL(dsa_unregister_switch);
  1432. /* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
  1433. * blocking that operation from completion, due to the dev_hold taken inside
  1434. * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
  1435. * the DSA master, so that the system can reboot successfully.
  1436. */
  1437. void dsa_switch_shutdown(struct dsa_switch *ds)
  1438. {
  1439. struct net_device *master, *slave_dev;
  1440. struct dsa_port *dp;
  1441. mutex_lock(&dsa2_mutex);
  1442. if (!ds->setup)
  1443. goto out;
  1444. rtnl_lock();
  1445. dsa_switch_for_each_user_port(dp, ds) {
  1446. master = dsa_port_to_master(dp);
  1447. slave_dev = dp->slave;
  1448. netdev_upper_dev_unlink(master, slave_dev);
  1449. }
  1450. /* Disconnect from further netdevice notifiers on the master,
  1451. * since netdev_uses_dsa() will now return false.
  1452. */
  1453. dsa_switch_for_each_cpu_port(dp, ds)
  1454. dp->master->dsa_ptr = NULL;
  1455. rtnl_unlock();
  1456. out:
  1457. mutex_unlock(&dsa2_mutex);
  1458. }
  1459. EXPORT_SYMBOL_GPL(dsa_switch_shutdown);