dsa.h 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * include/net/dsa.h - Driver for Distributed Switch Architecture switch chips
  4. * Copyright (c) 2008-2009 Marvell Semiconductor
  5. */
  6. #ifndef __LINUX_NET_DSA_H
  7. #define __LINUX_NET_DSA_H
  8. #include <linux/if.h>
  9. #include <linux/if_ether.h>
  10. #include <linux/list.h>
  11. #include <linux/notifier.h>
  12. #include <linux/timer.h>
  13. #include <linux/workqueue.h>
  14. #include <linux/of.h>
  15. #include <linux/ethtool.h>
  16. #include <linux/net_tstamp.h>
  17. #include <linux/phy.h>
  18. #include <linux/platform_data/dsa.h>
  19. #include <linux/phylink.h>
  20. #include <net/devlink.h>
  21. #include <net/switchdev.h>
  22. struct tc_action;
  23. struct phy_device;
  24. struct fixed_phy_status;
  25. struct phylink_link_state;
  26. #define DSA_TAG_PROTO_NONE_VALUE 0
  27. #define DSA_TAG_PROTO_BRCM_VALUE 1
  28. #define DSA_TAG_PROTO_BRCM_PREPEND_VALUE 2
  29. #define DSA_TAG_PROTO_DSA_VALUE 3
  30. #define DSA_TAG_PROTO_EDSA_VALUE 4
  31. #define DSA_TAG_PROTO_GSWIP_VALUE 5
  32. #define DSA_TAG_PROTO_KSZ9477_VALUE 6
  33. #define DSA_TAG_PROTO_KSZ9893_VALUE 7
  34. #define DSA_TAG_PROTO_LAN9303_VALUE 8
  35. #define DSA_TAG_PROTO_MTK_VALUE 9
  36. #define DSA_TAG_PROTO_QCA_VALUE 10
  37. #define DSA_TAG_PROTO_TRAILER_VALUE 11
  38. #define DSA_TAG_PROTO_8021Q_VALUE 12
  39. #define DSA_TAG_PROTO_SJA1105_VALUE 13
  40. #define DSA_TAG_PROTO_KSZ8795_VALUE 14
  41. #define DSA_TAG_PROTO_OCELOT_VALUE 15
  42. #define DSA_TAG_PROTO_AR9331_VALUE 16
  43. #define DSA_TAG_PROTO_RTL4_A_VALUE 17
  44. #define DSA_TAG_PROTO_HELLCREEK_VALUE 18
  45. #define DSA_TAG_PROTO_XRS700X_VALUE 19
  46. #define DSA_TAG_PROTO_OCELOT_8021Q_VALUE 20
  47. #define DSA_TAG_PROTO_SEVILLE_VALUE 21
  48. #define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22
  49. #define DSA_TAG_PROTO_SJA1110_VALUE 23
  50. #define DSA_TAG_PROTO_RTL8_4_VALUE 24
  51. #define DSA_TAG_PROTO_RTL8_4T_VALUE 25
  52. #define DSA_TAG_PROTO_RZN1_A5PSW_VALUE 26
  53. #define DSA_TAG_PROTO_LAN937X_VALUE 27
  54. enum dsa_tag_protocol {
  55. DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
  56. DSA_TAG_PROTO_BRCM = DSA_TAG_PROTO_BRCM_VALUE,
  57. DSA_TAG_PROTO_BRCM_LEGACY = DSA_TAG_PROTO_BRCM_LEGACY_VALUE,
  58. DSA_TAG_PROTO_BRCM_PREPEND = DSA_TAG_PROTO_BRCM_PREPEND_VALUE,
  59. DSA_TAG_PROTO_DSA = DSA_TAG_PROTO_DSA_VALUE,
  60. DSA_TAG_PROTO_EDSA = DSA_TAG_PROTO_EDSA_VALUE,
  61. DSA_TAG_PROTO_GSWIP = DSA_TAG_PROTO_GSWIP_VALUE,
  62. DSA_TAG_PROTO_KSZ9477 = DSA_TAG_PROTO_KSZ9477_VALUE,
  63. DSA_TAG_PROTO_KSZ9893 = DSA_TAG_PROTO_KSZ9893_VALUE,
  64. DSA_TAG_PROTO_LAN9303 = DSA_TAG_PROTO_LAN9303_VALUE,
  65. DSA_TAG_PROTO_MTK = DSA_TAG_PROTO_MTK_VALUE,
  66. DSA_TAG_PROTO_QCA = DSA_TAG_PROTO_QCA_VALUE,
  67. DSA_TAG_PROTO_TRAILER = DSA_TAG_PROTO_TRAILER_VALUE,
  68. DSA_TAG_PROTO_8021Q = DSA_TAG_PROTO_8021Q_VALUE,
  69. DSA_TAG_PROTO_SJA1105 = DSA_TAG_PROTO_SJA1105_VALUE,
  70. DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE,
  71. DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE,
  72. DSA_TAG_PROTO_AR9331 = DSA_TAG_PROTO_AR9331_VALUE,
  73. DSA_TAG_PROTO_RTL4_A = DSA_TAG_PROTO_RTL4_A_VALUE,
  74. DSA_TAG_PROTO_HELLCREEK = DSA_TAG_PROTO_HELLCREEK_VALUE,
  75. DSA_TAG_PROTO_XRS700X = DSA_TAG_PROTO_XRS700X_VALUE,
  76. DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE,
  77. DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE,
  78. DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE,
  79. DSA_TAG_PROTO_RTL8_4 = DSA_TAG_PROTO_RTL8_4_VALUE,
  80. DSA_TAG_PROTO_RTL8_4T = DSA_TAG_PROTO_RTL8_4T_VALUE,
  81. DSA_TAG_PROTO_RZN1_A5PSW = DSA_TAG_PROTO_RZN1_A5PSW_VALUE,
  82. DSA_TAG_PROTO_LAN937X = DSA_TAG_PROTO_LAN937X_VALUE,
  83. };
  84. struct dsa_switch;
  85. struct dsa_device_ops {
  86. struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
  87. struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
  88. void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
  89. int *offset);
  90. int (*connect)(struct dsa_switch *ds);
  91. void (*disconnect)(struct dsa_switch *ds);
  92. unsigned int needed_headroom;
  93. unsigned int needed_tailroom;
  94. const char *name;
  95. enum dsa_tag_protocol proto;
  96. /* Some tagging protocols either mangle or shift the destination MAC
  97. * address, in which case the DSA master would drop packets on ingress
  98. * if what it understands out of the destination MAC address is not in
  99. * its RX filter.
  100. */
  101. bool promisc_on_master;
  102. };
  103. /* This structure defines the control interfaces that are overlayed by the
  104. * DSA layer on top of the DSA CPU/management net_device instance. This is
  105. * used by the core net_device layer while calling various net_device_ops
  106. * function pointers.
  107. */
  108. struct dsa_netdevice_ops {
  109. int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr,
  110. int cmd);
  111. };
  112. #define DSA_TAG_DRIVER_ALIAS "dsa_tag-"
  113. #define MODULE_ALIAS_DSA_TAG_DRIVER(__proto) \
  114. MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE))
  115. struct dsa_lag {
  116. struct net_device *dev;
  117. unsigned int id;
  118. struct mutex fdb_lock;
  119. struct list_head fdbs;
  120. refcount_t refcount;
  121. };
  122. struct dsa_switch_tree {
  123. struct list_head list;
  124. /* List of switch ports */
  125. struct list_head ports;
  126. /* Notifier chain for switch-wide events */
  127. struct raw_notifier_head nh;
  128. /* Tree identifier */
  129. unsigned int index;
  130. /* Number of switches attached to this tree */
  131. struct kref refcount;
  132. /* Maps offloaded LAG netdevs to a zero-based linear ID for
  133. * drivers that need it.
  134. */
  135. struct dsa_lag **lags;
  136. /* Tagging protocol operations */
  137. const struct dsa_device_ops *tag_ops;
  138. /* Default tagging protocol preferred by the switches in this
  139. * tree.
  140. */
  141. enum dsa_tag_protocol default_proto;
  142. /* Has this tree been applied to the hardware? */
  143. bool setup;
  144. /*
  145. * Configuration data for the platform device that owns
  146. * this dsa switch tree instance.
  147. */
  148. struct dsa_platform_data *pd;
  149. /* List of DSA links composing the routing table */
  150. struct list_head rtable;
  151. /* Length of "lags" array */
  152. unsigned int lags_len;
  153. /* Track the largest switch index within a tree */
  154. unsigned int last_switch;
  155. };
  156. /* LAG IDs are one-based, the dst->lags array is zero-based */
  157. #define dsa_lags_foreach_id(_id, _dst) \
  158. for ((_id) = 1; (_id) <= (_dst)->lags_len; (_id)++) \
  159. if ((_dst)->lags[(_id) - 1])
  160. #define dsa_lag_foreach_port(_dp, _dst, _lag) \
  161. list_for_each_entry((_dp), &(_dst)->ports, list) \
  162. if (dsa_port_offloads_lag((_dp), (_lag)))
  163. #define dsa_hsr_foreach_port(_dp, _ds, _hsr) \
  164. list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
  165. if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr))
  166. static inline struct dsa_lag *dsa_lag_by_id(struct dsa_switch_tree *dst,
  167. unsigned int id)
  168. {
  169. /* DSA LAG IDs are one-based, dst->lags is zero-based */
  170. return dst->lags[id - 1];
  171. }
  172. static inline int dsa_lag_id(struct dsa_switch_tree *dst,
  173. struct net_device *lag_dev)
  174. {
  175. unsigned int id;
  176. dsa_lags_foreach_id(id, dst) {
  177. struct dsa_lag *lag = dsa_lag_by_id(dst, id);
  178. if (lag->dev == lag_dev)
  179. return lag->id;
  180. }
  181. return -ENODEV;
  182. }
  183. /* TC matchall action types */
  184. enum dsa_port_mall_action_type {
  185. DSA_PORT_MALL_MIRROR,
  186. DSA_PORT_MALL_POLICER,
  187. };
  188. /* TC mirroring entry */
  189. struct dsa_mall_mirror_tc_entry {
  190. u8 to_local_port;
  191. bool ingress;
  192. };
  193. /* TC port policer entry */
  194. struct dsa_mall_policer_tc_entry {
  195. u32 burst;
  196. u64 rate_bytes_per_sec;
  197. };
  198. /* TC matchall entry */
  199. struct dsa_mall_tc_entry {
  200. struct list_head list;
  201. unsigned long cookie;
  202. enum dsa_port_mall_action_type type;
  203. union {
  204. struct dsa_mall_mirror_tc_entry mirror;
  205. struct dsa_mall_policer_tc_entry policer;
  206. };
  207. };
  208. struct dsa_bridge {
  209. struct net_device *dev;
  210. unsigned int num;
  211. bool tx_fwd_offload;
  212. refcount_t refcount;
  213. };
  214. struct dsa_port {
  215. /* A CPU port is physically connected to a master device.
  216. * A user port exposed to userspace has a slave device.
  217. */
  218. union {
  219. struct net_device *master;
  220. struct net_device *slave;
  221. };
  222. /* Copy of the tagging protocol operations, for quicker access
  223. * in the data path. Valid only for the CPU ports.
  224. */
  225. const struct dsa_device_ops *tag_ops;
  226. /* Copies for faster access in master receive hot path */
  227. struct dsa_switch_tree *dst;
  228. struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
  229. struct dsa_switch *ds;
  230. unsigned int index;
  231. enum {
  232. DSA_PORT_TYPE_UNUSED = 0,
  233. DSA_PORT_TYPE_CPU,
  234. DSA_PORT_TYPE_DSA,
  235. DSA_PORT_TYPE_USER,
  236. } type;
  237. const char *name;
  238. struct dsa_port *cpu_dp;
  239. u8 mac[ETH_ALEN];
  240. u8 stp_state;
  241. /* Warning: the following bit fields are not atomic, and updating them
  242. * can only be done from code paths where concurrency is not possible
  243. * (probe time or under rtnl_lock).
  244. */
  245. u8 vlan_filtering:1;
  246. /* Managed by DSA on user ports and by drivers on CPU and DSA ports */
  247. u8 learning:1;
  248. u8 lag_tx_enabled:1;
  249. /* Master state bits, valid only on CPU ports */
  250. u8 master_admin_up:1;
  251. u8 master_oper_up:1;
  252. /* Valid only on user ports */
  253. u8 cpu_port_in_lag:1;
  254. u8 setup:1;
  255. struct device_node *dn;
  256. unsigned int ageing_time;
  257. struct dsa_bridge *bridge;
  258. struct devlink_port devlink_port;
  259. struct phylink *pl;
  260. struct phylink_config pl_config;
  261. struct dsa_lag *lag;
  262. struct net_device *hsr_dev;
  263. struct list_head list;
  264. /*
  265. * Original copy of the master netdev ethtool_ops
  266. */
  267. const struct ethtool_ops *orig_ethtool_ops;
  268. /*
  269. * Original copy of the master netdev net_device_ops
  270. */
  271. const struct dsa_netdevice_ops *netdev_ops;
  272. /* List of MAC addresses that must be forwarded on this port.
  273. * These are only valid on CPU ports and DSA links.
  274. */
  275. struct mutex addr_lists_lock;
  276. struct list_head fdbs;
  277. struct list_head mdbs;
  278. /* List of VLANs that CPU and DSA ports are members of. */
  279. struct mutex vlans_lock;
  280. struct list_head vlans;
  281. };
  282. /* TODO: ideally DSA ports would have a single dp->link_dp member,
  283. * and no dst->rtable nor this struct dsa_link would be needed,
  284. * but this would require some more complex tree walking,
  285. * so keep it stupid at the moment and list them all.
  286. */
  287. struct dsa_link {
  288. struct dsa_port *dp;
  289. struct dsa_port *link_dp;
  290. struct list_head list;
  291. };
  292. enum dsa_db_type {
  293. DSA_DB_PORT,
  294. DSA_DB_LAG,
  295. DSA_DB_BRIDGE,
  296. };
  297. struct dsa_db {
  298. enum dsa_db_type type;
  299. union {
  300. const struct dsa_port *dp;
  301. struct dsa_lag lag;
  302. struct dsa_bridge bridge;
  303. };
  304. };
  305. struct dsa_mac_addr {
  306. unsigned char addr[ETH_ALEN];
  307. u16 vid;
  308. refcount_t refcount;
  309. struct list_head list;
  310. struct dsa_db db;
  311. };
  312. struct dsa_vlan {
  313. u16 vid;
  314. refcount_t refcount;
  315. struct list_head list;
  316. };
  317. struct dsa_switch {
  318. struct device *dev;
  319. /*
  320. * Parent switch tree, and switch index.
  321. */
  322. struct dsa_switch_tree *dst;
  323. unsigned int index;
  324. /* Warning: the following bit fields are not atomic, and updating them
  325. * can only be done from code paths where concurrency is not possible
  326. * (probe time or under rtnl_lock).
  327. */
  328. u32 setup:1;
  329. /* Disallow bridge core from requesting different VLAN awareness
  330. * settings on ports if not hardware-supported
  331. */
  332. u32 vlan_filtering_is_global:1;
  333. /* Keep VLAN filtering enabled on ports not offloading any upper */
  334. u32 needs_standalone_vlan_filtering:1;
  335. /* Pass .port_vlan_add and .port_vlan_del to drivers even for bridges
  336. * that have vlan_filtering=0. All drivers should ideally set this (and
  337. * then the option would get removed), but it is unknown whether this
  338. * would break things or not.
  339. */
  340. u32 configure_vlan_while_not_filtering:1;
  341. /* If the switch driver always programs the CPU port as egress tagged
  342. * despite the VLAN configuration indicating otherwise, then setting
  343. * @untag_bridge_pvid will force the DSA receive path to pop the
  344. * bridge's default_pvid VLAN tagged frames to offer a consistent
  345. * behavior between a vlan_filtering=0 and vlan_filtering=1 bridge
  346. * device.
  347. */
  348. u32 untag_bridge_pvid:1;
  349. /* Let DSA manage the FDB entries towards the
  350. * CPU, based on the software bridge database.
  351. */
  352. u32 assisted_learning_on_cpu_port:1;
  353. /* In case vlan_filtering_is_global is set, the VLAN awareness state
  354. * should be retrieved from here and not from the per-port settings.
  355. */
  356. u32 vlan_filtering:1;
  357. /* For switches that only have the MRU configurable. To ensure the
  358. * configured MTU is not exceeded, normalization of MRU on all bridged
  359. * interfaces is needed.
  360. */
  361. u32 mtu_enforcement_ingress:1;
  362. /* Drivers that isolate the FDBs of multiple bridges must set this
  363. * to true to receive the bridge as an argument in .port_fdb_{add,del}
  364. * and .port_mdb_{add,del}. Otherwise, the bridge.num will always be
  365. * passed as zero.
  366. */
  367. u32 fdb_isolation:1;
  368. /* Listener for switch fabric events */
  369. struct notifier_block nb;
  370. /*
  371. * Give the switch driver somewhere to hang its private data
  372. * structure.
  373. */
  374. void *priv;
  375. void *tagger_data;
  376. /*
  377. * Configuration data for this switch.
  378. */
  379. struct dsa_chip_data *cd;
  380. /*
  381. * The switch operations.
  382. */
  383. const struct dsa_switch_ops *ops;
  384. /*
  385. * Slave mii_bus and devices for the individual ports.
  386. */
  387. u32 phys_mii_mask;
  388. struct mii_bus *slave_mii_bus;
  389. /* Ageing Time limits in msecs */
  390. unsigned int ageing_time_min;
  391. unsigned int ageing_time_max;
  392. /* Storage for drivers using tag_8021q */
  393. struct dsa_8021q_context *tag_8021q_ctx;
  394. /* devlink used to represent this switch device */
  395. struct devlink *devlink;
  396. /* Number of switch port queues */
  397. unsigned int num_tx_queues;
  398. /* Drivers that benefit from having an ID associated with each
  399. * offloaded LAG should set this to the maximum number of
  400. * supported IDs. DSA will then maintain a mapping of _at
  401. * least_ these many IDs, accessible to drivers via
  402. * dsa_lag_id().
  403. */
  404. unsigned int num_lag_ids;
  405. /* Drivers that support bridge forwarding offload or FDB isolation
  406. * should set this to the maximum number of bridges spanning the same
  407. * switch tree (or all trees, in the case of cross-tree bridging
  408. * support) that can be offloaded.
  409. */
  410. unsigned int max_num_bridges;
  411. unsigned int num_ports;
  412. };
  413. static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
  414. {
  415. struct dsa_switch_tree *dst = ds->dst;
  416. struct dsa_port *dp;
  417. list_for_each_entry(dp, &dst->ports, list)
  418. if (dp->ds == ds && dp->index == p)
  419. return dp;
  420. return NULL;
  421. }
  422. static inline bool dsa_port_is_dsa(struct dsa_port *port)
  423. {
  424. return port->type == DSA_PORT_TYPE_DSA;
  425. }
  426. static inline bool dsa_port_is_cpu(struct dsa_port *port)
  427. {
  428. return port->type == DSA_PORT_TYPE_CPU;
  429. }
  430. static inline bool dsa_port_is_user(struct dsa_port *dp)
  431. {
  432. return dp->type == DSA_PORT_TYPE_USER;
  433. }
  434. static inline bool dsa_port_is_unused(struct dsa_port *dp)
  435. {
  436. return dp->type == DSA_PORT_TYPE_UNUSED;
  437. }
  438. static inline bool dsa_port_master_is_operational(struct dsa_port *dp)
  439. {
  440. return dsa_port_is_cpu(dp) && dp->master_admin_up &&
  441. dp->master_oper_up;
  442. }
  443. static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
  444. {
  445. return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
  446. }
  447. static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
  448. {
  449. return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_CPU;
  450. }
  451. static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
  452. {
  453. return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_DSA;
  454. }
  455. static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
  456. {
  457. return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER;
  458. }
  459. #define dsa_tree_for_each_user_port(_dp, _dst) \
  460. list_for_each_entry((_dp), &(_dst)->ports, list) \
  461. if (dsa_port_is_user((_dp)))
  462. #define dsa_tree_for_each_user_port_continue_reverse(_dp, _dst) \
  463. list_for_each_entry_continue_reverse((_dp), &(_dst)->ports, list) \
  464. if (dsa_port_is_user((_dp)))
  465. #define dsa_tree_for_each_cpu_port(_dp, _dst) \
  466. list_for_each_entry((_dp), &(_dst)->ports, list) \
  467. if (dsa_port_is_cpu((_dp)))
  468. #define dsa_switch_for_each_port(_dp, _ds) \
  469. list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
  470. if ((_dp)->ds == (_ds))
  471. #define dsa_switch_for_each_port_safe(_dp, _next, _ds) \
  472. list_for_each_entry_safe((_dp), (_next), &(_ds)->dst->ports, list) \
  473. if ((_dp)->ds == (_ds))
  474. #define dsa_switch_for_each_port_continue_reverse(_dp, _ds) \
  475. list_for_each_entry_continue_reverse((_dp), &(_ds)->dst->ports, list) \
  476. if ((_dp)->ds == (_ds))
  477. #define dsa_switch_for_each_available_port(_dp, _ds) \
  478. dsa_switch_for_each_port((_dp), (_ds)) \
  479. if (!dsa_port_is_unused((_dp)))
  480. #define dsa_switch_for_each_user_port(_dp, _ds) \
  481. dsa_switch_for_each_port((_dp), (_ds)) \
  482. if (dsa_port_is_user((_dp)))
  483. #define dsa_switch_for_each_cpu_port(_dp, _ds) \
  484. dsa_switch_for_each_port((_dp), (_ds)) \
  485. if (dsa_port_is_cpu((_dp)))
  486. #define dsa_switch_for_each_cpu_port_continue_reverse(_dp, _ds) \
  487. dsa_switch_for_each_port_continue_reverse((_dp), (_ds)) \
  488. if (dsa_port_is_cpu((_dp)))
  489. static inline u32 dsa_user_ports(struct dsa_switch *ds)
  490. {
  491. struct dsa_port *dp;
  492. u32 mask = 0;
  493. dsa_switch_for_each_user_port(dp, ds)
  494. mask |= BIT(dp->index);
  495. return mask;
  496. }
  497. static inline u32 dsa_cpu_ports(struct dsa_switch *ds)
  498. {
  499. struct dsa_port *cpu_dp;
  500. u32 mask = 0;
  501. dsa_switch_for_each_cpu_port(cpu_dp, ds)
  502. mask |= BIT(cpu_dp->index);
  503. return mask;
  504. }
  505. /* Return the local port used to reach an arbitrary switch device */
  506. static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device)
  507. {
  508. struct dsa_switch_tree *dst = ds->dst;
  509. struct dsa_link *dl;
  510. list_for_each_entry(dl, &dst->rtable, list)
  511. if (dl->dp->ds == ds && dl->link_dp->ds->index == device)
  512. return dl->dp->index;
  513. return ds->num_ports;
  514. }
  515. /* Return the local port used to reach an arbitrary switch port */
  516. static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
  517. int port)
  518. {
  519. if (device == ds->index)
  520. return port;
  521. else
  522. return dsa_routing_port(ds, device);
  523. }
  524. /* Return the local port used to reach the dedicated CPU port */
  525. static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port)
  526. {
  527. const struct dsa_port *dp = dsa_to_port(ds, port);
  528. const struct dsa_port *cpu_dp = dp->cpu_dp;
  529. if (!cpu_dp)
  530. return port;
  531. return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index);
  532. }
  533. /* Return true if this is the local port used to reach the CPU port */
  534. static inline bool dsa_is_upstream_port(struct dsa_switch *ds, int port)
  535. {
  536. if (dsa_is_unused_port(ds, port))
  537. return false;
  538. return port == dsa_upstream_port(ds, port);
  539. }
  540. /* Return true if this is a DSA port leading away from the CPU */
  541. static inline bool dsa_is_downstream_port(struct dsa_switch *ds, int port)
  542. {
  543. return dsa_is_dsa_port(ds, port) && !dsa_is_upstream_port(ds, port);
  544. }
  545. /* Return the local port used to reach the CPU port */
  546. static inline unsigned int dsa_switch_upstream_port(struct dsa_switch *ds)
  547. {
  548. struct dsa_port *dp;
  549. dsa_switch_for_each_available_port(dp, ds) {
  550. return dsa_upstream_port(ds, dp->index);
  551. }
  552. return ds->num_ports;
  553. }
  554. /* Return true if @upstream_ds is an upstream switch of @downstream_ds, meaning
  555. * that the routing port from @downstream_ds to @upstream_ds is also the port
  556. * which @downstream_ds uses to reach its dedicated CPU.
  557. */
  558. static inline bool dsa_switch_is_upstream_of(struct dsa_switch *upstream_ds,
  559. struct dsa_switch *downstream_ds)
  560. {
  561. int routing_port;
  562. if (upstream_ds == downstream_ds)
  563. return true;
  564. routing_port = dsa_routing_port(downstream_ds, upstream_ds->index);
  565. return dsa_is_upstream_port(downstream_ds, routing_port);
  566. }
  567. static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp)
  568. {
  569. const struct dsa_switch *ds = dp->ds;
  570. if (ds->vlan_filtering_is_global)
  571. return ds->vlan_filtering;
  572. else
  573. return dp->vlan_filtering;
  574. }
  575. static inline unsigned int dsa_port_lag_id_get(struct dsa_port *dp)
  576. {
  577. return dp->lag ? dp->lag->id : 0;
  578. }
  579. static inline struct net_device *dsa_port_lag_dev_get(struct dsa_port *dp)
  580. {
  581. return dp->lag ? dp->lag->dev : NULL;
  582. }
  583. static inline bool dsa_port_offloads_lag(struct dsa_port *dp,
  584. const struct dsa_lag *lag)
  585. {
  586. return dsa_port_lag_dev_get(dp) == lag->dev;
  587. }
  588. static inline struct net_device *dsa_port_to_master(const struct dsa_port *dp)
  589. {
  590. if (dp->cpu_port_in_lag)
  591. return dsa_port_lag_dev_get(dp->cpu_dp);
  592. return dp->cpu_dp->master;
  593. }
  594. static inline
  595. struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
  596. {
  597. if (!dp->bridge)
  598. return NULL;
  599. if (dp->lag)
  600. return dp->lag->dev;
  601. else if (dp->hsr_dev)
  602. return dp->hsr_dev;
  603. return dp->slave;
  604. }
  605. static inline struct net_device *
  606. dsa_port_bridge_dev_get(const struct dsa_port *dp)
  607. {
  608. return dp->bridge ? dp->bridge->dev : NULL;
  609. }
  610. static inline unsigned int dsa_port_bridge_num_get(struct dsa_port *dp)
  611. {
  612. return dp->bridge ? dp->bridge->num : 0;
  613. }
  614. static inline bool dsa_port_bridge_same(const struct dsa_port *a,
  615. const struct dsa_port *b)
  616. {
  617. struct net_device *br_a = dsa_port_bridge_dev_get(a);
  618. struct net_device *br_b = dsa_port_bridge_dev_get(b);
  619. /* Standalone ports are not in the same bridge with one another */
  620. return (!br_a || !br_b) ? false : (br_a == br_b);
  621. }
  622. static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
  623. const struct net_device *dev)
  624. {
  625. return dsa_port_to_bridge_port(dp) == dev;
  626. }
  627. static inline bool
  628. dsa_port_offloads_bridge_dev(struct dsa_port *dp,
  629. const struct net_device *bridge_dev)
  630. {
  631. /* DSA ports connected to a bridge, and event was emitted
  632. * for the bridge.
  633. */
  634. return dsa_port_bridge_dev_get(dp) == bridge_dev;
  635. }
  636. static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
  637. const struct dsa_bridge *bridge)
  638. {
  639. return dsa_port_bridge_dev_get(dp) == bridge->dev;
  640. }
  641. /* Returns true if any port of this tree offloads the given net_device */
  642. static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
  643. const struct net_device *dev)
  644. {
  645. struct dsa_port *dp;
  646. list_for_each_entry(dp, &dst->ports, list)
  647. if (dsa_port_offloads_bridge_port(dp, dev))
  648. return true;
  649. return false;
  650. }
  651. /* Returns true if any port of this tree offloads the given bridge */
  652. static inline bool
  653. dsa_tree_offloads_bridge_dev(struct dsa_switch_tree *dst,
  654. const struct net_device *bridge_dev)
  655. {
  656. struct dsa_port *dp;
  657. list_for_each_entry(dp, &dst->ports, list)
  658. if (dsa_port_offloads_bridge_dev(dp, bridge_dev))
  659. return true;
  660. return false;
  661. }
  662. static inline bool dsa_port_tree_same(const struct dsa_port *a,
  663. const struct dsa_port *b)
  664. {
  665. return a->ds->dst == b->ds->dst;
  666. }
  667. typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
  668. bool is_static, void *data);
  669. struct dsa_switch_ops {
  670. /*
  671. * Tagging protocol helpers called for the CPU ports and DSA links.
  672. * @get_tag_protocol retrieves the initial tagging protocol and is
  673. * mandatory. Switches which can operate using multiple tagging
  674. * protocols should implement @change_tag_protocol and report in
  675. * @get_tag_protocol the tagger in current use.
  676. */
  677. enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
  678. int port,
  679. enum dsa_tag_protocol mprot);
  680. int (*change_tag_protocol)(struct dsa_switch *ds,
  681. enum dsa_tag_protocol proto);
  682. /*
  683. * Method for switch drivers to connect to the tagging protocol driver
  684. * in current use. The switch driver can provide handlers for certain
  685. * types of packets for switch management.
  686. */
  687. int (*connect_tag_protocol)(struct dsa_switch *ds,
  688. enum dsa_tag_protocol proto);
  689. int (*port_change_master)(struct dsa_switch *ds, int port,
  690. struct net_device *master,
  691. struct netlink_ext_ack *extack);
  692. /* Optional switch-wide initialization and destruction methods */
  693. int (*setup)(struct dsa_switch *ds);
  694. void (*teardown)(struct dsa_switch *ds);
  695. /* Per-port initialization and destruction methods. Mandatory if the
  696. * driver registers devlink port regions, optional otherwise.
  697. */
  698. int (*port_setup)(struct dsa_switch *ds, int port);
  699. void (*port_teardown)(struct dsa_switch *ds, int port);
  700. u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
  701. /*
  702. * Access to the switch's PHY registers.
  703. */
  704. int (*phy_read)(struct dsa_switch *ds, int port, int regnum);
  705. int (*phy_write)(struct dsa_switch *ds, int port,
  706. int regnum, u16 val);
  707. /*
  708. * Link state adjustment (called from libphy)
  709. */
  710. void (*adjust_link)(struct dsa_switch *ds, int port,
  711. struct phy_device *phydev);
  712. void (*fixed_link_update)(struct dsa_switch *ds, int port,
  713. struct fixed_phy_status *st);
  714. /*
  715. * PHYLINK integration
  716. */
  717. void (*phylink_get_caps)(struct dsa_switch *ds, int port,
  718. struct phylink_config *config);
  719. void (*phylink_validate)(struct dsa_switch *ds, int port,
  720. unsigned long *supported,
  721. struct phylink_link_state *state);
  722. struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds,
  723. int port,
  724. phy_interface_t iface);
  725. int (*phylink_mac_link_state)(struct dsa_switch *ds, int port,
  726. struct phylink_link_state *state);
  727. void (*phylink_mac_config)(struct dsa_switch *ds, int port,
  728. unsigned int mode,
  729. const struct phylink_link_state *state);
  730. void (*phylink_mac_an_restart)(struct dsa_switch *ds, int port);
  731. void (*phylink_mac_link_down)(struct dsa_switch *ds, int port,
  732. unsigned int mode,
  733. phy_interface_t interface);
  734. void (*phylink_mac_link_up)(struct dsa_switch *ds, int port,
  735. unsigned int mode,
  736. phy_interface_t interface,
  737. struct phy_device *phydev,
  738. int speed, int duplex,
  739. bool tx_pause, bool rx_pause);
  740. void (*phylink_fixed_state)(struct dsa_switch *ds, int port,
  741. struct phylink_link_state *state);
  742. /*
  743. * Port statistics counters.
  744. */
  745. void (*get_strings)(struct dsa_switch *ds, int port,
  746. u32 stringset, uint8_t *data);
  747. void (*get_ethtool_stats)(struct dsa_switch *ds,
  748. int port, uint64_t *data);
  749. int (*get_sset_count)(struct dsa_switch *ds, int port, int sset);
  750. void (*get_ethtool_phy_stats)(struct dsa_switch *ds,
  751. int port, uint64_t *data);
  752. void (*get_eth_phy_stats)(struct dsa_switch *ds, int port,
  753. struct ethtool_eth_phy_stats *phy_stats);
  754. void (*get_eth_mac_stats)(struct dsa_switch *ds, int port,
  755. struct ethtool_eth_mac_stats *mac_stats);
  756. void (*get_eth_ctrl_stats)(struct dsa_switch *ds, int port,
  757. struct ethtool_eth_ctrl_stats *ctrl_stats);
  758. void (*get_rmon_stats)(struct dsa_switch *ds, int port,
  759. struct ethtool_rmon_stats *rmon_stats,
  760. const struct ethtool_rmon_hist_range **ranges);
  761. void (*get_stats64)(struct dsa_switch *ds, int port,
  762. struct rtnl_link_stats64 *s);
  763. void (*get_pause_stats)(struct dsa_switch *ds, int port,
  764. struct ethtool_pause_stats *pause_stats);
  765. void (*self_test)(struct dsa_switch *ds, int port,
  766. struct ethtool_test *etest, u64 *data);
  767. /*
  768. * ethtool Wake-on-LAN
  769. */
  770. void (*get_wol)(struct dsa_switch *ds, int port,
  771. struct ethtool_wolinfo *w);
  772. int (*set_wol)(struct dsa_switch *ds, int port,
  773. struct ethtool_wolinfo *w);
  774. /*
  775. * ethtool timestamp info
  776. */
  777. int (*get_ts_info)(struct dsa_switch *ds, int port,
  778. struct ethtool_ts_info *ts);
  779. /*
  780. * DCB ops
  781. */
  782. int (*port_get_default_prio)(struct dsa_switch *ds, int port);
  783. int (*port_set_default_prio)(struct dsa_switch *ds, int port,
  784. u8 prio);
  785. int (*port_get_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp);
  786. int (*port_add_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
  787. u8 prio);
  788. int (*port_del_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
  789. u8 prio);
  790. /*
  791. * Suspend and resume
  792. */
  793. int (*suspend)(struct dsa_switch *ds);
  794. int (*resume)(struct dsa_switch *ds);
  795. /*
  796. * Port enable/disable
  797. */
  798. int (*port_enable)(struct dsa_switch *ds, int port,
  799. struct phy_device *phy);
  800. void (*port_disable)(struct dsa_switch *ds, int port);
  801. /*
  802. * Port's MAC EEE settings
  803. */
  804. int (*set_mac_eee)(struct dsa_switch *ds, int port,
  805. struct ethtool_eee *e);
  806. int (*get_mac_eee)(struct dsa_switch *ds, int port,
  807. struct ethtool_eee *e);
  808. /* EEPROM access */
  809. int (*get_eeprom_len)(struct dsa_switch *ds);
  810. int (*get_eeprom)(struct dsa_switch *ds,
  811. struct ethtool_eeprom *eeprom, u8 *data);
  812. int (*set_eeprom)(struct dsa_switch *ds,
  813. struct ethtool_eeprom *eeprom, u8 *data);
  814. /*
  815. * Register access.
  816. */
  817. int (*get_regs_len)(struct dsa_switch *ds, int port);
  818. void (*get_regs)(struct dsa_switch *ds, int port,
  819. struct ethtool_regs *regs, void *p);
  820. /*
  821. * Upper device tracking.
  822. */
  823. int (*port_prechangeupper)(struct dsa_switch *ds, int port,
  824. struct netdev_notifier_changeupper_info *info);
  825. /*
  826. * Bridge integration
  827. */
  828. int (*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs);
  829. int (*port_bridge_join)(struct dsa_switch *ds, int port,
  830. struct dsa_bridge bridge,
  831. bool *tx_fwd_offload,
  832. struct netlink_ext_ack *extack);
  833. void (*port_bridge_leave)(struct dsa_switch *ds, int port,
  834. struct dsa_bridge bridge);
  835. void (*port_stp_state_set)(struct dsa_switch *ds, int port,
  836. u8 state);
  837. int (*port_mst_state_set)(struct dsa_switch *ds, int port,
  838. const struct switchdev_mst_state *state);
  839. void (*port_fast_age)(struct dsa_switch *ds, int port);
  840. int (*port_vlan_fast_age)(struct dsa_switch *ds, int port, u16 vid);
  841. int (*port_pre_bridge_flags)(struct dsa_switch *ds, int port,
  842. struct switchdev_brport_flags flags,
  843. struct netlink_ext_ack *extack);
  844. int (*port_bridge_flags)(struct dsa_switch *ds, int port,
  845. struct switchdev_brport_flags flags,
  846. struct netlink_ext_ack *extack);
  847. void (*port_set_host_flood)(struct dsa_switch *ds, int port,
  848. bool uc, bool mc);
  849. /*
  850. * VLAN support
  851. */
  852. int (*port_vlan_filtering)(struct dsa_switch *ds, int port,
  853. bool vlan_filtering,
  854. struct netlink_ext_ack *extack);
  855. int (*port_vlan_add)(struct dsa_switch *ds, int port,
  856. const struct switchdev_obj_port_vlan *vlan,
  857. struct netlink_ext_ack *extack);
  858. int (*port_vlan_del)(struct dsa_switch *ds, int port,
  859. const struct switchdev_obj_port_vlan *vlan);
  860. int (*vlan_msti_set)(struct dsa_switch *ds, struct dsa_bridge bridge,
  861. const struct switchdev_vlan_msti *msti);
  862. /*
  863. * Forwarding database
  864. */
  865. int (*port_fdb_add)(struct dsa_switch *ds, int port,
  866. const unsigned char *addr, u16 vid,
  867. struct dsa_db db);
  868. int (*port_fdb_del)(struct dsa_switch *ds, int port,
  869. const unsigned char *addr, u16 vid,
  870. struct dsa_db db);
  871. int (*port_fdb_dump)(struct dsa_switch *ds, int port,
  872. dsa_fdb_dump_cb_t *cb, void *data);
  873. int (*lag_fdb_add)(struct dsa_switch *ds, struct dsa_lag lag,
  874. const unsigned char *addr, u16 vid,
  875. struct dsa_db db);
  876. int (*lag_fdb_del)(struct dsa_switch *ds, struct dsa_lag lag,
  877. const unsigned char *addr, u16 vid,
  878. struct dsa_db db);
  879. /*
  880. * Multicast database
  881. */
  882. int (*port_mdb_add)(struct dsa_switch *ds, int port,
  883. const struct switchdev_obj_port_mdb *mdb,
  884. struct dsa_db db);
  885. int (*port_mdb_del)(struct dsa_switch *ds, int port,
  886. const struct switchdev_obj_port_mdb *mdb,
  887. struct dsa_db db);
  888. /*
  889. * RXNFC
  890. */
  891. int (*get_rxnfc)(struct dsa_switch *ds, int port,
  892. struct ethtool_rxnfc *nfc, u32 *rule_locs);
  893. int (*set_rxnfc)(struct dsa_switch *ds, int port,
  894. struct ethtool_rxnfc *nfc);
  895. /*
  896. * TC integration
  897. */
  898. int (*cls_flower_add)(struct dsa_switch *ds, int port,
  899. struct flow_cls_offload *cls, bool ingress);
  900. int (*cls_flower_del)(struct dsa_switch *ds, int port,
  901. struct flow_cls_offload *cls, bool ingress);
  902. int (*cls_flower_stats)(struct dsa_switch *ds, int port,
  903. struct flow_cls_offload *cls, bool ingress);
  904. int (*port_mirror_add)(struct dsa_switch *ds, int port,
  905. struct dsa_mall_mirror_tc_entry *mirror,
  906. bool ingress, struct netlink_ext_ack *extack);
  907. void (*port_mirror_del)(struct dsa_switch *ds, int port,
  908. struct dsa_mall_mirror_tc_entry *mirror);
  909. int (*port_policer_add)(struct dsa_switch *ds, int port,
  910. struct dsa_mall_policer_tc_entry *policer);
  911. void (*port_policer_del)(struct dsa_switch *ds, int port);
  912. int (*port_setup_tc)(struct dsa_switch *ds, int port,
  913. enum tc_setup_type type, void *type_data);
  914. /*
  915. * Cross-chip operations
  916. */
  917. int (*crosschip_bridge_join)(struct dsa_switch *ds, int tree_index,
  918. int sw_index, int port,
  919. struct dsa_bridge bridge,
  920. struct netlink_ext_ack *extack);
  921. void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index,
  922. int sw_index, int port,
  923. struct dsa_bridge bridge);
  924. int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index,
  925. int port);
  926. int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
  927. int port, struct dsa_lag lag,
  928. struct netdev_lag_upper_info *info,
  929. struct netlink_ext_ack *extack);
  930. int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
  931. int port, struct dsa_lag lag);
  932. /*
  933. * PTP functionality
  934. */
  935. int (*port_hwtstamp_get)(struct dsa_switch *ds, int port,
  936. struct ifreq *ifr);
  937. int (*port_hwtstamp_set)(struct dsa_switch *ds, int port,
  938. struct ifreq *ifr);
  939. void (*port_txtstamp)(struct dsa_switch *ds, int port,
  940. struct sk_buff *skb);
  941. bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
  942. struct sk_buff *skb, unsigned int type);
  943. /* Devlink parameters, etc */
  944. int (*devlink_param_get)(struct dsa_switch *ds, u32 id,
  945. struct devlink_param_gset_ctx *ctx);
  946. int (*devlink_param_set)(struct dsa_switch *ds, u32 id,
  947. struct devlink_param_gset_ctx *ctx);
  948. int (*devlink_info_get)(struct dsa_switch *ds,
  949. struct devlink_info_req *req,
  950. struct netlink_ext_ack *extack);
  951. int (*devlink_sb_pool_get)(struct dsa_switch *ds,
  952. unsigned int sb_index, u16 pool_index,
  953. struct devlink_sb_pool_info *pool_info);
  954. int (*devlink_sb_pool_set)(struct dsa_switch *ds, unsigned int sb_index,
  955. u16 pool_index, u32 size,
  956. enum devlink_sb_threshold_type threshold_type,
  957. struct netlink_ext_ack *extack);
  958. int (*devlink_sb_port_pool_get)(struct dsa_switch *ds, int port,
  959. unsigned int sb_index, u16 pool_index,
  960. u32 *p_threshold);
  961. int (*devlink_sb_port_pool_set)(struct dsa_switch *ds, int port,
  962. unsigned int sb_index, u16 pool_index,
  963. u32 threshold,
  964. struct netlink_ext_ack *extack);
  965. int (*devlink_sb_tc_pool_bind_get)(struct dsa_switch *ds, int port,
  966. unsigned int sb_index, u16 tc_index,
  967. enum devlink_sb_pool_type pool_type,
  968. u16 *p_pool_index, u32 *p_threshold);
  969. int (*devlink_sb_tc_pool_bind_set)(struct dsa_switch *ds, int port,
  970. unsigned int sb_index, u16 tc_index,
  971. enum devlink_sb_pool_type pool_type,
  972. u16 pool_index, u32 threshold,
  973. struct netlink_ext_ack *extack);
  974. int (*devlink_sb_occ_snapshot)(struct dsa_switch *ds,
  975. unsigned int sb_index);
  976. int (*devlink_sb_occ_max_clear)(struct dsa_switch *ds,
  977. unsigned int sb_index);
  978. int (*devlink_sb_occ_port_pool_get)(struct dsa_switch *ds, int port,
  979. unsigned int sb_index, u16 pool_index,
  980. u32 *p_cur, u32 *p_max);
  981. int (*devlink_sb_occ_tc_port_bind_get)(struct dsa_switch *ds, int port,
  982. unsigned int sb_index, u16 tc_index,
  983. enum devlink_sb_pool_type pool_type,
  984. u32 *p_cur, u32 *p_max);
  985. /*
  986. * MTU change functionality. Switches can also adjust their MRU through
  987. * this method. By MTU, one understands the SDU (L2 payload) length.
  988. * If the switch needs to account for the DSA tag on the CPU port, this
  989. * method needs to do so privately.
  990. */
  991. int (*port_change_mtu)(struct dsa_switch *ds, int port,
  992. int new_mtu);
  993. int (*port_max_mtu)(struct dsa_switch *ds, int port);
  994. /*
  995. * LAG integration
  996. */
  997. int (*port_lag_change)(struct dsa_switch *ds, int port);
  998. int (*port_lag_join)(struct dsa_switch *ds, int port,
  999. struct dsa_lag lag,
  1000. struct netdev_lag_upper_info *info,
  1001. struct netlink_ext_ack *extack);
  1002. int (*port_lag_leave)(struct dsa_switch *ds, int port,
  1003. struct dsa_lag lag);
  1004. /*
  1005. * HSR integration
  1006. */
  1007. int (*port_hsr_join)(struct dsa_switch *ds, int port,
  1008. struct net_device *hsr);
  1009. int (*port_hsr_leave)(struct dsa_switch *ds, int port,
  1010. struct net_device *hsr);
  1011. /*
  1012. * MRP integration
  1013. */
  1014. int (*port_mrp_add)(struct dsa_switch *ds, int port,
  1015. const struct switchdev_obj_mrp *mrp);
  1016. int (*port_mrp_del)(struct dsa_switch *ds, int port,
  1017. const struct switchdev_obj_mrp *mrp);
  1018. int (*port_mrp_add_ring_role)(struct dsa_switch *ds, int port,
  1019. const struct switchdev_obj_ring_role_mrp *mrp);
  1020. int (*port_mrp_del_ring_role)(struct dsa_switch *ds, int port,
  1021. const struct switchdev_obj_ring_role_mrp *mrp);
  1022. /*
  1023. * tag_8021q operations
  1024. */
  1025. int (*tag_8021q_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
  1026. u16 flags);
  1027. int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
  1028. /*
  1029. * DSA master tracking operations
  1030. */
  1031. void (*master_state_change)(struct dsa_switch *ds,
  1032. const struct net_device *master,
  1033. bool operational);
  1034. };
  1035. #define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
  1036. DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, \
  1037. dsa_devlink_param_get, dsa_devlink_param_set, NULL)
  1038. int dsa_devlink_param_get(struct devlink *dl, u32 id,
  1039. struct devlink_param_gset_ctx *ctx);
  1040. int dsa_devlink_param_set(struct devlink *dl, u32 id,
  1041. struct devlink_param_gset_ctx *ctx);
  1042. int dsa_devlink_params_register(struct dsa_switch *ds,
  1043. const struct devlink_param *params,
  1044. size_t params_count);
  1045. void dsa_devlink_params_unregister(struct dsa_switch *ds,
  1046. const struct devlink_param *params,
  1047. size_t params_count);
  1048. int dsa_devlink_resource_register(struct dsa_switch *ds,
  1049. const char *resource_name,
  1050. u64 resource_size,
  1051. u64 resource_id,
  1052. u64 parent_resource_id,
  1053. const struct devlink_resource_size_params *size_params);
  1054. void dsa_devlink_resources_unregister(struct dsa_switch *ds);
  1055. void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
  1056. u64 resource_id,
  1057. devlink_resource_occ_get_t *occ_get,
  1058. void *occ_get_priv);
  1059. void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
  1060. u64 resource_id);
  1061. struct devlink_region *
  1062. dsa_devlink_region_create(struct dsa_switch *ds,
  1063. const struct devlink_region_ops *ops,
  1064. u32 region_max_snapshots, u64 region_size);
  1065. struct devlink_region *
  1066. dsa_devlink_port_region_create(struct dsa_switch *ds,
  1067. int port,
  1068. const struct devlink_port_region_ops *ops,
  1069. u32 region_max_snapshots, u64 region_size);
  1070. void dsa_devlink_region_destroy(struct devlink_region *region);
  1071. struct dsa_port *dsa_port_from_netdev(struct net_device *netdev);
  1072. struct dsa_devlink_priv {
  1073. struct dsa_switch *ds;
  1074. };
  1075. static inline struct dsa_switch *dsa_devlink_to_ds(struct devlink *dl)
  1076. {
  1077. struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
  1078. return dl_priv->ds;
  1079. }
  1080. static inline
  1081. struct dsa_switch *dsa_devlink_port_to_ds(struct devlink_port *port)
  1082. {
  1083. struct devlink *dl = port->devlink;
  1084. struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
  1085. return dl_priv->ds;
  1086. }
  1087. static inline int dsa_devlink_port_to_port(struct devlink_port *port)
  1088. {
  1089. return port->index;
  1090. }
  1091. struct dsa_switch_driver {
  1092. struct list_head list;
  1093. const struct dsa_switch_ops *ops;
  1094. };
  1095. struct net_device *dsa_dev_to_net_device(struct device *dev);
  1096. bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
  1097. const unsigned char *addr, u16 vid,
  1098. struct dsa_db db);
  1099. bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
  1100. const struct switchdev_obj_port_mdb *mdb,
  1101. struct dsa_db db);
  1102. /* Keep inline for faster access in hot path */
  1103. static inline bool netdev_uses_dsa(const struct net_device *dev)
  1104. {
  1105. #if IS_ENABLED(CONFIG_NET_DSA)
  1106. return dev->dsa_ptr && dev->dsa_ptr->rcv;
  1107. #endif
  1108. return false;
  1109. }
  1110. /* All DSA tags that push the EtherType to the right (basically all except tail
  1111. * tags, which don't break dissection) can be treated the same from the
  1112. * perspective of the flow dissector.
  1113. *
  1114. * We need to return:
  1115. * - offset: the (B - A) difference between:
  1116. * A. the position of the real EtherType and
  1117. * B. the current skb->data (aka ETH_HLEN bytes into the frame, aka 2 bytes
  1118. * after the normal EtherType was supposed to be)
  1119. * The offset in bytes is exactly equal to the tagger overhead (and half of
  1120. * that, in __be16 shorts).
  1121. *
  1122. * - proto: the value of the real EtherType.
  1123. */
  1124. static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb,
  1125. __be16 *proto, int *offset)
  1126. {
  1127. #if IS_ENABLED(CONFIG_NET_DSA)
  1128. const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops;
  1129. int tag_len = ops->needed_headroom;
  1130. *offset = tag_len;
  1131. *proto = ((__be16 *)skb->data)[(tag_len / 2) - 1];
  1132. #endif
  1133. }
  1134. #if IS_ENABLED(CONFIG_NET_DSA)
  1135. static inline int __dsa_netdevice_ops_check(struct net_device *dev)
  1136. {
  1137. int err = -EOPNOTSUPP;
  1138. if (!dev->dsa_ptr)
  1139. return err;
  1140. if (!dev->dsa_ptr->netdev_ops)
  1141. return err;
  1142. return 0;
  1143. }
  1144. static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
  1145. int cmd)
  1146. {
  1147. const struct dsa_netdevice_ops *ops;
  1148. int err;
  1149. err = __dsa_netdevice_ops_check(dev);
  1150. if (err)
  1151. return err;
  1152. ops = dev->dsa_ptr->netdev_ops;
  1153. return ops->ndo_eth_ioctl(dev, ifr, cmd);
  1154. }
  1155. #else
  1156. static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
  1157. int cmd)
  1158. {
  1159. return -EOPNOTSUPP;
  1160. }
  1161. #endif
  1162. void dsa_unregister_switch(struct dsa_switch *ds);
  1163. int dsa_register_switch(struct dsa_switch *ds);
  1164. void dsa_switch_shutdown(struct dsa_switch *ds);
  1165. struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
  1166. void dsa_flush_workqueue(void);
  1167. #ifdef CONFIG_PM_SLEEP
  1168. int dsa_switch_suspend(struct dsa_switch *ds);
  1169. int dsa_switch_resume(struct dsa_switch *ds);
  1170. #else
  1171. static inline int dsa_switch_suspend(struct dsa_switch *ds)
  1172. {
  1173. return 0;
  1174. }
  1175. static inline int dsa_switch_resume(struct dsa_switch *ds)
  1176. {
  1177. return 0;
  1178. }
  1179. #endif /* CONFIG_PM_SLEEP */
  1180. #if IS_ENABLED(CONFIG_NET_DSA)
  1181. bool dsa_slave_dev_check(const struct net_device *dev);
  1182. #else
  1183. static inline bool dsa_slave_dev_check(const struct net_device *dev)
  1184. {
  1185. return false;
  1186. }
  1187. #endif
  1188. netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
  1189. void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up);
  1190. struct dsa_tag_driver {
  1191. const struct dsa_device_ops *ops;
  1192. struct list_head list;
  1193. struct module *owner;
  1194. };
  1195. void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
  1196. unsigned int count,
  1197. struct module *owner);
  1198. void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
  1199. unsigned int count);
  1200. #define dsa_tag_driver_module_drivers(__dsa_tag_drivers_array, __count) \
  1201. static int __init dsa_tag_driver_module_init(void) \
  1202. { \
  1203. dsa_tag_drivers_register(__dsa_tag_drivers_array, __count, \
  1204. THIS_MODULE); \
  1205. return 0; \
  1206. } \
  1207. module_init(dsa_tag_driver_module_init); \
  1208. \
  1209. static void __exit dsa_tag_driver_module_exit(void) \
  1210. { \
  1211. dsa_tag_drivers_unregister(__dsa_tag_drivers_array, __count); \
  1212. } \
  1213. module_exit(dsa_tag_driver_module_exit)
  1214. /**
  1215. * module_dsa_tag_drivers() - Helper macro for registering DSA tag
  1216. * drivers
  1217. * @__ops_array: Array of tag driver structures
  1218. *
  1219. * Helper macro for DSA tag drivers which do not do anything special
  1220. * in module init/exit. Each module may only use this macro once, and
  1221. * calling it replaces module_init() and module_exit().
  1222. */
  1223. #define module_dsa_tag_drivers(__ops_array) \
  1224. dsa_tag_driver_module_drivers(__ops_array, ARRAY_SIZE(__ops_array))
  1225. #define DSA_TAG_DRIVER_NAME(__ops) dsa_tag_driver ## _ ## __ops
  1226. /* Create a static structure we can build a linked list of dsa_tag
  1227. * drivers
  1228. */
  1229. #define DSA_TAG_DRIVER(__ops) \
  1230. static struct dsa_tag_driver DSA_TAG_DRIVER_NAME(__ops) = { \
  1231. .ops = &__ops, \
  1232. }
  1233. /**
  1234. * module_dsa_tag_driver() - Helper macro for registering a single DSA tag
  1235. * driver
  1236. * @__ops: Single tag driver structures
  1237. *
  1238. * Helper macro for DSA tag drivers which do not do anything special
  1239. * in module init/exit. Each module may only use this macro once, and
  1240. * calling it replaces module_init() and module_exit().
  1241. */
  1242. #define module_dsa_tag_driver(__ops) \
  1243. DSA_TAG_DRIVER(__ops); \
  1244. \
  1245. static struct dsa_tag_driver *dsa_tag_driver_array[] = { \
  1246. &DSA_TAG_DRIVER_NAME(__ops) \
  1247. }; \
  1248. module_dsa_tag_drivers(dsa_tag_driver_array)
  1249. #endif