lantiq_gswip.c 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs
  4. *
  5. * Copyright (C) 2010 Lantiq Deutschland
  6. * Copyright (C) 2012 John Crispin <[email protected]>
  7. * Copyright (C) 2017 - 2019 Hauke Mehrtens <[email protected]>
  8. *
  9. * The VLAN and bridge model the GSWIP hardware uses does not directly
  10. * matches the model DSA uses.
  11. *
  12. * The hardware has 64 possible table entries for bridges with one VLAN
  13. * ID, one flow id and a list of ports for each bridge. All entries which
  14. * match the same flow ID are combined in the mac learning table, they
  15. * act as one global bridge.
  16. * The hardware does not support VLAN filter on the port, but on the
  17. * bridge, this driver converts the DSA model to the hardware.
  18. *
  19. * The CPU gets all the exception frames which do not match any forwarding
  20. * rule and the CPU port is also added to all bridges. This makes it possible
  21. * to handle all the special cases easily in software.
  22. * At the initialization the driver allocates one bridge table entry for
  23. * each switch port which is used when the port is used without an
  24. * explicit bridge. This prevents the frames from being forwarded
  25. * between all LAN ports by default.
  26. */
  27. #include <linux/clk.h>
  28. #include <linux/delay.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/firmware.h>
  31. #include <linux/if_bridge.h>
  32. #include <linux/if_vlan.h>
  33. #include <linux/iopoll.h>
  34. #include <linux/mfd/syscon.h>
  35. #include <linux/module.h>
  36. #include <linux/of_mdio.h>
  37. #include <linux/of_net.h>
  38. #include <linux/of_platform.h>
  39. #include <linux/phy.h>
  40. #include <linux/phylink.h>
  41. #include <linux/platform_device.h>
  42. #include <linux/regmap.h>
  43. #include <linux/reset.h>
  44. #include <net/dsa.h>
  45. #include <dt-bindings/mips/lantiq_rcu_gphy.h>
  46. #include "lantiq_pce.h"
  47. /* GSWIP MDIO Registers */
  48. #define GSWIP_MDIO_GLOB 0x00
  49. #define GSWIP_MDIO_GLOB_ENABLE BIT(15)
  50. #define GSWIP_MDIO_CTRL 0x08
  51. #define GSWIP_MDIO_CTRL_BUSY BIT(12)
  52. #define GSWIP_MDIO_CTRL_RD BIT(11)
  53. #define GSWIP_MDIO_CTRL_WR BIT(10)
  54. #define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
  55. #define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
  56. #define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
  57. #define GSWIP_MDIO_READ 0x09
  58. #define GSWIP_MDIO_WRITE 0x0A
  59. #define GSWIP_MDIO_MDC_CFG0 0x0B
  60. #define GSWIP_MDIO_MDC_CFG1 0x0C
  61. #define GSWIP_MDIO_PHYp(p) (0x15 - (p))
  62. #define GSWIP_MDIO_PHY_LINK_MASK 0x6000
  63. #define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
  64. #define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
  65. #define GSWIP_MDIO_PHY_LINK_UP 0x2000
  66. #define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
  67. #define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
  68. #define GSWIP_MDIO_PHY_SPEED_M10 0x0000
  69. #define GSWIP_MDIO_PHY_SPEED_M100 0x0800
  70. #define GSWIP_MDIO_PHY_SPEED_G1 0x1000
  71. #define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
  72. #define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
  73. #define GSWIP_MDIO_PHY_FDUP_EN 0x0200
  74. #define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
  75. #define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
  76. #define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
  77. #define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
  78. #define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
  79. #define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
  80. #define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
  81. #define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
  82. #define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
  83. #define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
  84. #define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
  85. GSWIP_MDIO_PHY_FCONRX_MASK | \
  86. GSWIP_MDIO_PHY_FCONTX_MASK | \
  87. GSWIP_MDIO_PHY_LINK_MASK | \
  88. GSWIP_MDIO_PHY_SPEED_MASK | \
  89. GSWIP_MDIO_PHY_FDUP_MASK)
  90. /* GSWIP MII Registers */
  91. #define GSWIP_MII_CFGp(p) (0x2 * (p))
  92. #define GSWIP_MII_CFG_RESET BIT(15)
  93. #define GSWIP_MII_CFG_EN BIT(14)
  94. #define GSWIP_MII_CFG_ISOLATE BIT(13)
  95. #define GSWIP_MII_CFG_LDCLKDIS BIT(12)
  96. #define GSWIP_MII_CFG_RGMII_IBS BIT(8)
  97. #define GSWIP_MII_CFG_RMII_CLK BIT(7)
  98. #define GSWIP_MII_CFG_MODE_MIIP 0x0
  99. #define GSWIP_MII_CFG_MODE_MIIM 0x1
  100. #define GSWIP_MII_CFG_MODE_RMIIP 0x2
  101. #define GSWIP_MII_CFG_MODE_RMIIM 0x3
  102. #define GSWIP_MII_CFG_MODE_RGMII 0x4
  103. #define GSWIP_MII_CFG_MODE_GMII 0x9
  104. #define GSWIP_MII_CFG_MODE_MASK 0xf
  105. #define GSWIP_MII_CFG_RATE_M2P5 0x00
  106. #define GSWIP_MII_CFG_RATE_M25 0x10
  107. #define GSWIP_MII_CFG_RATE_M125 0x20
  108. #define GSWIP_MII_CFG_RATE_M50 0x30
  109. #define GSWIP_MII_CFG_RATE_AUTO 0x40
  110. #define GSWIP_MII_CFG_RATE_MASK 0x70
  111. #define GSWIP_MII_PCDU0 0x01
  112. #define GSWIP_MII_PCDU1 0x03
  113. #define GSWIP_MII_PCDU5 0x05
  114. #define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
  115. #define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
  116. /* GSWIP Core Registers */
  117. #define GSWIP_SWRES 0x000
  118. #define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
  119. #define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
  120. #define GSWIP_VERSION 0x013
  121. #define GSWIP_VERSION_REV_SHIFT 0
  122. #define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
  123. #define GSWIP_VERSION_MOD_SHIFT 8
  124. #define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
  125. #define GSWIP_VERSION_2_0 0x100
  126. #define GSWIP_VERSION_2_1 0x021
  127. #define GSWIP_VERSION_2_2 0x122
  128. #define GSWIP_VERSION_2_2_ETC 0x022
  129. #define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
  130. #define GSWIP_BM_RAM_ADDR 0x044
  131. #define GSWIP_BM_RAM_CTRL 0x045
  132. #define GSWIP_BM_RAM_CTRL_BAS BIT(15)
  133. #define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
  134. #define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
  135. #define GSWIP_BM_QUEUE_GCTRL 0x04A
  136. #define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
  137. /* buffer management Port Configuration Register */
  138. #define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
  139. #define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
  140. #define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
  141. /* buffer management Port Control Register */
  142. #define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
  143. #define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
  144. #define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
  145. /* PCE */
  146. #define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
  147. #define GSWIP_PCE_TBL_MASK 0x448
  148. #define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
  149. #define GSWIP_PCE_TBL_ADDR 0x44E
  150. #define GSWIP_PCE_TBL_CTRL 0x44F
  151. #define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
  152. #define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
  153. #define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
  154. #define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
  155. #define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
  156. #define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
  157. #define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
  158. #define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
  159. #define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
  160. #define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
  161. #define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
  162. #define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
  163. #define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
  164. #define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
  165. #define GSWIP_PCE_GCTRL_0 0x456
  166. #define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
  167. #define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
  168. #define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
  169. #define GSWIP_PCE_GCTRL_1 0x457
  170. #define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
  171. #define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
  172. #define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
  173. #define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
  174. #define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
  175. #define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
  176. #define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
  177. #define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
  178. #define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
  179. #define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
  180. #define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
  181. #define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
  182. #define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
  183. #define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
  184. #define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
  185. #define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
  186. #define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
  187. #define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
  188. #define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
  189. #define GSWIP_MAC_FLEN 0x8C5
  190. #define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
  191. #define GSWIP_MAC_CTRL_0_PADEN BIT(8)
  192. #define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
  193. #define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
  194. #define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
  195. #define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
  196. #define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
  197. #define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
  198. #define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
  199. #define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
  200. #define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
  201. #define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
  202. #define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
  203. #define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
  204. #define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
  205. #define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
  206. #define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
  207. #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
  208. #define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */
  209. #define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
  210. /* Ethernet Switch Fetch DMA Port Control Register */
  211. #define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
  212. #define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
  213. #define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
  214. #define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
  215. #define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
  216. #define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
  217. #define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
  218. #define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
  219. #define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
  220. /* Ethernet Switch Store DMA Port Control Register */
  221. #define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
  222. #define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
  223. #define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
  224. #define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
  225. #define GSWIP_TABLE_ACTIVE_VLAN 0x01
  226. #define GSWIP_TABLE_VLAN_MAPPING 0x02
  227. #define GSWIP_TABLE_MAC_BRIDGE 0x0b
  228. #define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */
  229. #define XRX200_GPHY_FW_ALIGN (16 * 1024)
  230. /* Maximum packet size supported by the switch. In theory this should be 10240,
  231. * but long packets currently cause lock-ups with an MTU of over 2526. Medium
  232. * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
  233. * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
  234. * packet reception. This is probably caused by the PPA engine, which is on the
  235. * RX part of the device. Packet transmission works properly up to 10240.
  236. */
  237. #define GSWIP_MAX_PACKET_LENGTH 2400
  238. struct gswip_hw_info {
  239. int max_ports;
  240. int cpu_port;
  241. const struct dsa_switch_ops *ops;
  242. };
  243. struct xway_gphy_match_data {
  244. char *fe_firmware_name;
  245. char *ge_firmware_name;
  246. };
  247. struct gswip_gphy_fw {
  248. struct clk *clk_gate;
  249. struct reset_control *reset;
  250. u32 fw_addr_offset;
  251. char *fw_name;
  252. };
  253. struct gswip_vlan {
  254. struct net_device *bridge;
  255. u16 vid;
  256. u8 fid;
  257. };
  258. struct gswip_priv {
  259. __iomem void *gswip;
  260. __iomem void *mdio;
  261. __iomem void *mii;
  262. const struct gswip_hw_info *hw_info;
  263. const struct xway_gphy_match_data *gphy_fw_name_cfg;
  264. struct dsa_switch *ds;
  265. struct device *dev;
  266. struct regmap *rcu_regmap;
  267. struct gswip_vlan vlans[64];
  268. int num_gphy_fw;
  269. struct gswip_gphy_fw *gphy_fw;
  270. u32 port_vlan_filter;
  271. struct mutex pce_table_lock;
  272. };
  273. struct gswip_pce_table_entry {
  274. u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index
  275. u16 table; // PCE_TBL_CTRL.ADDR = pData->table
  276. u16 key[8];
  277. u16 val[5];
  278. u16 mask;
  279. u8 gmap;
  280. bool type;
  281. bool valid;
  282. bool key_mode;
  283. };
  284. struct gswip_rmon_cnt_desc {
  285. unsigned int size;
  286. unsigned int offset;
  287. const char *name;
  288. };
  289. #define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
  290. static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
  291. /** Receive Packet Count (only packets that are accepted and not discarded). */
  292. MIB_DESC(1, 0x1F, "RxGoodPkts"),
  293. MIB_DESC(1, 0x23, "RxUnicastPkts"),
  294. MIB_DESC(1, 0x22, "RxMulticastPkts"),
  295. MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
  296. MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
  297. MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
  298. MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
  299. MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
  300. MIB_DESC(1, 0x20, "RxGoodPausePkts"),
  301. MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
  302. MIB_DESC(1, 0x12, "Rx64BytePkts"),
  303. MIB_DESC(1, 0x13, "Rx127BytePkts"),
  304. MIB_DESC(1, 0x14, "Rx255BytePkts"),
  305. MIB_DESC(1, 0x15, "Rx511BytePkts"),
  306. MIB_DESC(1, 0x16, "Rx1023BytePkts"),
  307. /** Receive Size 1024-1522 (or more, if configured) Packet Count. */
  308. MIB_DESC(1, 0x17, "RxMaxBytePkts"),
  309. MIB_DESC(1, 0x18, "RxDroppedPkts"),
  310. MIB_DESC(1, 0x19, "RxFilteredPkts"),
  311. MIB_DESC(2, 0x24, "RxGoodBytes"),
  312. MIB_DESC(2, 0x26, "RxBadBytes"),
  313. MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
  314. MIB_DESC(1, 0x0C, "TxGoodPkts"),
  315. MIB_DESC(1, 0x06, "TxUnicastPkts"),
  316. MIB_DESC(1, 0x07, "TxMulticastPkts"),
  317. MIB_DESC(1, 0x00, "Tx64BytePkts"),
  318. MIB_DESC(1, 0x01, "Tx127BytePkts"),
  319. MIB_DESC(1, 0x02, "Tx255BytePkts"),
  320. MIB_DESC(1, 0x03, "Tx511BytePkts"),
  321. MIB_DESC(1, 0x04, "Tx1023BytePkts"),
  322. /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
  323. MIB_DESC(1, 0x05, "TxMaxBytePkts"),
  324. MIB_DESC(1, 0x08, "TxSingleCollCount"),
  325. MIB_DESC(1, 0x09, "TxMultCollCount"),
  326. MIB_DESC(1, 0x0A, "TxLateCollCount"),
  327. MIB_DESC(1, 0x0B, "TxExcessCollCount"),
  328. MIB_DESC(1, 0x0D, "TxPauseCount"),
  329. MIB_DESC(1, 0x10, "TxDroppedPkts"),
  330. MIB_DESC(2, 0x0E, "TxGoodBytes"),
  331. };
  332. static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset)
  333. {
  334. return __raw_readl(priv->gswip + (offset * 4));
  335. }
  336. static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset)
  337. {
  338. __raw_writel(val, priv->gswip + (offset * 4));
  339. }
  340. static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set,
  341. u32 offset)
  342. {
  343. u32 val = gswip_switch_r(priv, offset);
  344. val &= ~(clear);
  345. val |= set;
  346. gswip_switch_w(priv, val, offset);
  347. }
  348. static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
  349. u32 cleared)
  350. {
  351. u32 val;
  352. return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val,
  353. (val & cleared) == 0, 20, 50000);
  354. }
  355. static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset)
  356. {
  357. return __raw_readl(priv->mdio + (offset * 4));
  358. }
  359. static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset)
  360. {
  361. __raw_writel(val, priv->mdio + (offset * 4));
  362. }
  363. static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set,
  364. u32 offset)
  365. {
  366. u32 val = gswip_mdio_r(priv, offset);
  367. val &= ~(clear);
  368. val |= set;
  369. gswip_mdio_w(priv, val, offset);
  370. }
  371. static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset)
  372. {
  373. return __raw_readl(priv->mii + (offset * 4));
  374. }
  375. static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset)
  376. {
  377. __raw_writel(val, priv->mii + (offset * 4));
  378. }
  379. static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
  380. u32 offset)
  381. {
  382. u32 val = gswip_mii_r(priv, offset);
  383. val &= ~(clear);
  384. val |= set;
  385. gswip_mii_w(priv, val, offset);
  386. }
  387. static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
  388. int port)
  389. {
  390. /* There's no MII_CFG register for the CPU port */
  391. if (!dsa_is_cpu_port(priv->ds, port))
  392. gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
  393. }
  394. static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
  395. int port)
  396. {
  397. switch (port) {
  398. case 0:
  399. gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0);
  400. break;
  401. case 1:
  402. gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1);
  403. break;
  404. case 5:
  405. gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5);
  406. break;
  407. }
  408. }
  409. static int gswip_mdio_poll(struct gswip_priv *priv)
  410. {
  411. int cnt = 100;
  412. while (likely(cnt--)) {
  413. u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL);
  414. if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0)
  415. return 0;
  416. usleep_range(20, 40);
  417. }
  418. return -ETIMEDOUT;
  419. }
  420. static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
  421. {
  422. struct gswip_priv *priv = bus->priv;
  423. int err;
  424. err = gswip_mdio_poll(priv);
  425. if (err) {
  426. dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
  427. return err;
  428. }
  429. gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE);
  430. gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
  431. ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
  432. (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
  433. GSWIP_MDIO_CTRL);
  434. return 0;
  435. }
  436. static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
  437. {
  438. struct gswip_priv *priv = bus->priv;
  439. int err;
  440. err = gswip_mdio_poll(priv);
  441. if (err) {
  442. dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
  443. return err;
  444. }
  445. gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
  446. ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
  447. (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
  448. GSWIP_MDIO_CTRL);
  449. err = gswip_mdio_poll(priv);
  450. if (err) {
  451. dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
  452. return err;
  453. }
  454. return gswip_mdio_r(priv, GSWIP_MDIO_READ);
  455. }
  456. static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
  457. {
  458. struct dsa_switch *ds = priv->ds;
  459. int err;
  460. ds->slave_mii_bus = mdiobus_alloc();
  461. if (!ds->slave_mii_bus)
  462. return -ENOMEM;
  463. ds->slave_mii_bus->priv = priv;
  464. ds->slave_mii_bus->read = gswip_mdio_rd;
  465. ds->slave_mii_bus->write = gswip_mdio_wr;
  466. ds->slave_mii_bus->name = "lantiq,xrx200-mdio";
  467. snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii",
  468. dev_name(priv->dev));
  469. ds->slave_mii_bus->parent = priv->dev;
  470. ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
  471. err = of_mdiobus_register(ds->slave_mii_bus, mdio_np);
  472. if (err)
  473. mdiobus_free(ds->slave_mii_bus);
  474. return err;
  475. }
  476. static int gswip_pce_table_entry_read(struct gswip_priv *priv,
  477. struct gswip_pce_table_entry *tbl)
  478. {
  479. int i;
  480. int err;
  481. u16 crtl;
  482. u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
  483. GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
  484. mutex_lock(&priv->pce_table_lock);
  485. err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
  486. GSWIP_PCE_TBL_CTRL_BAS);
  487. if (err) {
  488. mutex_unlock(&priv->pce_table_lock);
  489. return err;
  490. }
  491. gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
  492. gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
  493. GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
  494. tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS,
  495. GSWIP_PCE_TBL_CTRL);
  496. err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
  497. GSWIP_PCE_TBL_CTRL_BAS);
  498. if (err) {
  499. mutex_unlock(&priv->pce_table_lock);
  500. return err;
  501. }
  502. for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
  503. tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
  504. for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
  505. tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i));
  506. tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK);
  507. crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
  508. tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
  509. tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
  510. tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
  511. mutex_unlock(&priv->pce_table_lock);
  512. return 0;
  513. }
  514. static int gswip_pce_table_entry_write(struct gswip_priv *priv,
  515. struct gswip_pce_table_entry *tbl)
  516. {
  517. int i;
  518. int err;
  519. u16 crtl;
  520. u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
  521. GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
  522. mutex_lock(&priv->pce_table_lock);
  523. err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
  524. GSWIP_PCE_TBL_CTRL_BAS);
  525. if (err) {
  526. mutex_unlock(&priv->pce_table_lock);
  527. return err;
  528. }
  529. gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
  530. gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
  531. GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
  532. tbl->table | addr_mode,
  533. GSWIP_PCE_TBL_CTRL);
  534. for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
  535. gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i));
  536. for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
  537. gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i));
  538. gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
  539. GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
  540. tbl->table | addr_mode,
  541. GSWIP_PCE_TBL_CTRL);
  542. gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK);
  543. crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
  544. crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
  545. GSWIP_PCE_TBL_CTRL_GMAP_MASK);
  546. if (tbl->type)
  547. crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
  548. if (tbl->valid)
  549. crtl |= GSWIP_PCE_TBL_CTRL_VLD;
  550. crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
  551. crtl |= GSWIP_PCE_TBL_CTRL_BAS;
  552. gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
  553. err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
  554. GSWIP_PCE_TBL_CTRL_BAS);
  555. mutex_unlock(&priv->pce_table_lock);
  556. return err;
  557. }
  558. /* Add the LAN port into a bridge with the CPU port by
  559. * default. This prevents automatic forwarding of
  560. * packages between the LAN ports when no explicit
  561. * bridge is configured.
  562. */
  563. static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
  564. {
  565. struct gswip_pce_table_entry vlan_active = {0,};
  566. struct gswip_pce_table_entry vlan_mapping = {0,};
  567. unsigned int cpu_port = priv->hw_info->cpu_port;
  568. unsigned int max_ports = priv->hw_info->max_ports;
  569. int err;
  570. if (port >= max_ports) {
  571. dev_err(priv->dev, "single port for %i supported\n", port);
  572. return -EIO;
  573. }
  574. vlan_active.index = port + 1;
  575. vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
  576. vlan_active.key[0] = 0; /* vid */
  577. vlan_active.val[0] = port + 1 /* fid */;
  578. vlan_active.valid = add;
  579. err = gswip_pce_table_entry_write(priv, &vlan_active);
  580. if (err) {
  581. dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
  582. return err;
  583. }
  584. if (!add)
  585. return 0;
  586. vlan_mapping.index = port + 1;
  587. vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
  588. vlan_mapping.val[0] = 0 /* vid */;
  589. vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
  590. vlan_mapping.val[2] = 0;
  591. err = gswip_pce_table_entry_write(priv, &vlan_mapping);
  592. if (err) {
  593. dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
  594. return err;
  595. }
  596. return 0;
  597. }
  598. static int gswip_port_enable(struct dsa_switch *ds, int port,
  599. struct phy_device *phydev)
  600. {
  601. struct gswip_priv *priv = ds->priv;
  602. int err;
  603. if (!dsa_is_user_port(ds, port))
  604. return 0;
  605. if (!dsa_is_cpu_port(ds, port)) {
  606. err = gswip_add_single_port_br(priv, port, true);
  607. if (err)
  608. return err;
  609. }
  610. /* RMON Counter Enable for port */
  611. gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
  612. /* enable port fetch/store dma & VLAN Modification */
  613. gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN |
  614. GSWIP_FDMA_PCTRL_VLANMOD_BOTH,
  615. GSWIP_FDMA_PCTRLp(port));
  616. gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
  617. GSWIP_SDMA_PCTRLp(port));
  618. if (!dsa_is_cpu_port(ds, port)) {
  619. u32 mdio_phy = 0;
  620. if (phydev)
  621. mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
  622. gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
  623. GSWIP_MDIO_PHYp(port));
  624. }
  625. return 0;
  626. }
  627. static void gswip_port_disable(struct dsa_switch *ds, int port)
  628. {
  629. struct gswip_priv *priv = ds->priv;
  630. if (!dsa_is_user_port(ds, port))
  631. return;
  632. gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
  633. GSWIP_FDMA_PCTRLp(port));
  634. gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
  635. GSWIP_SDMA_PCTRLp(port));
  636. }
  637. static int gswip_pce_load_microcode(struct gswip_priv *priv)
  638. {
  639. int i;
  640. int err;
  641. gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
  642. GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
  643. GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL);
  644. gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK);
  645. for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) {
  646. gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR);
  647. gswip_switch_w(priv, gswip_pce_microcode[i].val_0,
  648. GSWIP_PCE_TBL_VAL(0));
  649. gswip_switch_w(priv, gswip_pce_microcode[i].val_1,
  650. GSWIP_PCE_TBL_VAL(1));
  651. gswip_switch_w(priv, gswip_pce_microcode[i].val_2,
  652. GSWIP_PCE_TBL_VAL(2));
  653. gswip_switch_w(priv, gswip_pce_microcode[i].val_3,
  654. GSWIP_PCE_TBL_VAL(3));
  655. /* start the table access: */
  656. gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS,
  657. GSWIP_PCE_TBL_CTRL);
  658. err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
  659. GSWIP_PCE_TBL_CTRL_BAS);
  660. if (err)
  661. return err;
  662. }
  663. /* tell the switch that the microcode is loaded */
  664. gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID,
  665. GSWIP_PCE_GCTRL_0);
  666. return 0;
  667. }
  668. static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
  669. bool vlan_filtering,
  670. struct netlink_ext_ack *extack)
  671. {
  672. struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
  673. struct gswip_priv *priv = ds->priv;
  674. /* Do not allow changing the VLAN filtering options while in bridge */
  675. if (bridge && !!(priv->port_vlan_filter & BIT(port)) != vlan_filtering) {
  676. NL_SET_ERR_MSG_MOD(extack,
  677. "Dynamic toggling of vlan_filtering not supported");
  678. return -EIO;
  679. }
  680. if (vlan_filtering) {
  681. /* Use port based VLAN tag */
  682. gswip_switch_mask(priv,
  683. GSWIP_PCE_VCTRL_VSR,
  684. GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
  685. GSWIP_PCE_VCTRL_VEMR,
  686. GSWIP_PCE_VCTRL(port));
  687. gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
  688. GSWIP_PCE_PCTRL_0p(port));
  689. } else {
  690. /* Use port based VLAN tag */
  691. gswip_switch_mask(priv,
  692. GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
  693. GSWIP_PCE_VCTRL_VEMR,
  694. GSWIP_PCE_VCTRL_VSR,
  695. GSWIP_PCE_VCTRL(port));
  696. gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM,
  697. GSWIP_PCE_PCTRL_0p(port));
  698. }
  699. return 0;
  700. }
  701. static int gswip_setup(struct dsa_switch *ds)
  702. {
  703. struct gswip_priv *priv = ds->priv;
  704. unsigned int cpu_port = priv->hw_info->cpu_port;
  705. int i;
  706. int err;
  707. gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES);
  708. usleep_range(5000, 10000);
  709. gswip_switch_w(priv, 0, GSWIP_SWRES);
  710. /* disable port fetch/store dma on all ports */
  711. for (i = 0; i < priv->hw_info->max_ports; i++) {
  712. gswip_port_disable(ds, i);
  713. gswip_port_vlan_filtering(ds, i, false, NULL);
  714. }
  715. /* enable Switch */
  716. gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
  717. err = gswip_pce_load_microcode(priv);
  718. if (err) {
  719. dev_err(priv->dev, "writing PCE microcode failed, %i", err);
  720. return err;
  721. }
  722. /* Default unknown Broadcast/Multicast/Unicast port maps */
  723. gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1);
  724. gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
  725. gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
  726. /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
  727. * interoperability problem with this auto polling mechanism because
  728. * their status registers think that the link is in a different state
  729. * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
  730. * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
  731. * auto polling state machine consider the link being negotiated with
  732. * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
  733. * to the switch port being completely dead (RX and TX are both not
  734. * working).
  735. * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
  736. * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
  737. * it would work fine for a few minutes to hours and then stop, on
  738. * other device it would no traffic could be sent or received at all.
  739. * Testing shows that when PHY auto polling is disabled these problems
  740. * go away.
  741. */
  742. gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
  743. /* Configure the MDIO Clock 2.5 MHz */
  744. gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
  745. /* Disable the xMII interface and clear it's isolation bit */
  746. for (i = 0; i < priv->hw_info->max_ports; i++)
  747. gswip_mii_mask_cfg(priv,
  748. GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
  749. 0, i);
  750. /* enable special tag insertion on cpu port */
  751. gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
  752. GSWIP_FDMA_PCTRLp(cpu_port));
  753. /* accept special tag in ingress direction */
  754. gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
  755. GSWIP_PCE_PCTRL_0p(cpu_port));
  756. gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
  757. GSWIP_BM_QUEUE_GCTRL);
  758. /* VLAN aware Switching */
  759. gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
  760. /* Flush MAC Table */
  761. gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0);
  762. err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
  763. GSWIP_PCE_GCTRL_0_MTFL);
  764. if (err) {
  765. dev_err(priv->dev, "MAC flushing didn't finish\n");
  766. return err;
  767. }
  768. ds->mtu_enforcement_ingress = true;
  769. gswip_port_enable(ds, cpu_port, NULL);
  770. ds->configure_vlan_while_not_filtering = false;
  771. return 0;
  772. }
  773. static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
  774. int port,
  775. enum dsa_tag_protocol mp)
  776. {
  777. return DSA_TAG_PROTO_GSWIP;
  778. }
  779. static int gswip_vlan_active_create(struct gswip_priv *priv,
  780. struct net_device *bridge,
  781. int fid, u16 vid)
  782. {
  783. struct gswip_pce_table_entry vlan_active = {0,};
  784. unsigned int max_ports = priv->hw_info->max_ports;
  785. int idx = -1;
  786. int err;
  787. int i;
  788. /* Look for a free slot */
  789. for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
  790. if (!priv->vlans[i].bridge) {
  791. idx = i;
  792. break;
  793. }
  794. }
  795. if (idx == -1)
  796. return -ENOSPC;
  797. if (fid == -1)
  798. fid = idx;
  799. vlan_active.index = idx;
  800. vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
  801. vlan_active.key[0] = vid;
  802. vlan_active.val[0] = fid;
  803. vlan_active.valid = true;
  804. err = gswip_pce_table_entry_write(priv, &vlan_active);
  805. if (err) {
  806. dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
  807. return err;
  808. }
  809. priv->vlans[idx].bridge = bridge;
  810. priv->vlans[idx].vid = vid;
  811. priv->vlans[idx].fid = fid;
  812. return idx;
  813. }
  814. static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
  815. {
  816. struct gswip_pce_table_entry vlan_active = {0,};
  817. int err;
  818. vlan_active.index = idx;
  819. vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
  820. vlan_active.valid = false;
  821. err = gswip_pce_table_entry_write(priv, &vlan_active);
  822. if (err)
  823. dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
  824. priv->vlans[idx].bridge = NULL;
  825. return err;
  826. }
  827. static int gswip_vlan_add_unaware(struct gswip_priv *priv,
  828. struct net_device *bridge, int port)
  829. {
  830. struct gswip_pce_table_entry vlan_mapping = {0,};
  831. unsigned int max_ports = priv->hw_info->max_ports;
  832. unsigned int cpu_port = priv->hw_info->cpu_port;
  833. bool active_vlan_created = false;
  834. int idx = -1;
  835. int i;
  836. int err;
  837. /* Check if there is already a page for this bridge */
  838. for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
  839. if (priv->vlans[i].bridge == bridge) {
  840. idx = i;
  841. break;
  842. }
  843. }
  844. /* If this bridge is not programmed yet, add a Active VLAN table
  845. * entry in a free slot and prepare the VLAN mapping table entry.
  846. */
  847. if (idx == -1) {
  848. idx = gswip_vlan_active_create(priv, bridge, -1, 0);
  849. if (idx < 0)
  850. return idx;
  851. active_vlan_created = true;
  852. vlan_mapping.index = idx;
  853. vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
  854. /* VLAN ID byte, maps to the VLAN ID of vlan active table */
  855. vlan_mapping.val[0] = 0;
  856. } else {
  857. /* Read the existing VLAN mapping entry from the switch */
  858. vlan_mapping.index = idx;
  859. vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
  860. err = gswip_pce_table_entry_read(priv, &vlan_mapping);
  861. if (err) {
  862. dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
  863. err);
  864. return err;
  865. }
  866. }
  867. /* Update the VLAN mapping entry and write it to the switch */
  868. vlan_mapping.val[1] |= BIT(cpu_port);
  869. vlan_mapping.val[1] |= BIT(port);
  870. err = gswip_pce_table_entry_write(priv, &vlan_mapping);
  871. if (err) {
  872. dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
  873. /* In case an Active VLAN was creaetd delete it again */
  874. if (active_vlan_created)
  875. gswip_vlan_active_remove(priv, idx);
  876. return err;
  877. }
  878. gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
  879. return 0;
  880. }
  881. static int gswip_vlan_add_aware(struct gswip_priv *priv,
  882. struct net_device *bridge, int port,
  883. u16 vid, bool untagged,
  884. bool pvid)
  885. {
  886. struct gswip_pce_table_entry vlan_mapping = {0,};
  887. unsigned int max_ports = priv->hw_info->max_ports;
  888. unsigned int cpu_port = priv->hw_info->cpu_port;
  889. bool active_vlan_created = false;
  890. int idx = -1;
  891. int fid = -1;
  892. int i;
  893. int err;
  894. /* Check if there is already a page for this bridge */
  895. for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
  896. if (priv->vlans[i].bridge == bridge) {
  897. if (fid != -1 && fid != priv->vlans[i].fid)
  898. dev_err(priv->dev, "one bridge with multiple flow ids\n");
  899. fid = priv->vlans[i].fid;
  900. if (priv->vlans[i].vid == vid) {
  901. idx = i;
  902. break;
  903. }
  904. }
  905. }
  906. /* If this bridge is not programmed yet, add a Active VLAN table
  907. * entry in a free slot and prepare the VLAN mapping table entry.
  908. */
  909. if (idx == -1) {
  910. idx = gswip_vlan_active_create(priv, bridge, fid, vid);
  911. if (idx < 0)
  912. return idx;
  913. active_vlan_created = true;
  914. vlan_mapping.index = idx;
  915. vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
  916. /* VLAN ID byte, maps to the VLAN ID of vlan active table */
  917. vlan_mapping.val[0] = vid;
  918. } else {
  919. /* Read the existing VLAN mapping entry from the switch */
  920. vlan_mapping.index = idx;
  921. vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
  922. err = gswip_pce_table_entry_read(priv, &vlan_mapping);
  923. if (err) {
  924. dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
  925. err);
  926. return err;
  927. }
  928. }
  929. vlan_mapping.val[0] = vid;
  930. /* Update the VLAN mapping entry and write it to the switch */
  931. vlan_mapping.val[1] |= BIT(cpu_port);
  932. vlan_mapping.val[2] |= BIT(cpu_port);
  933. vlan_mapping.val[1] |= BIT(port);
  934. if (untagged)
  935. vlan_mapping.val[2] &= ~BIT(port);
  936. else
  937. vlan_mapping.val[2] |= BIT(port);
  938. err = gswip_pce_table_entry_write(priv, &vlan_mapping);
  939. if (err) {
  940. dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
  941. /* In case an Active VLAN was creaetd delete it again */
  942. if (active_vlan_created)
  943. gswip_vlan_active_remove(priv, idx);
  944. return err;
  945. }
  946. if (pvid)
  947. gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port));
  948. return 0;
  949. }
  950. static int gswip_vlan_remove(struct gswip_priv *priv,
  951. struct net_device *bridge, int port,
  952. u16 vid, bool pvid, bool vlan_aware)
  953. {
  954. struct gswip_pce_table_entry vlan_mapping = {0,};
  955. unsigned int max_ports = priv->hw_info->max_ports;
  956. unsigned int cpu_port = priv->hw_info->cpu_port;
  957. int idx = -1;
  958. int i;
  959. int err;
  960. /* Check if there is already a page for this bridge */
  961. for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
  962. if (priv->vlans[i].bridge == bridge &&
  963. (!vlan_aware || priv->vlans[i].vid == vid)) {
  964. idx = i;
  965. break;
  966. }
  967. }
  968. if (idx == -1) {
  969. dev_err(priv->dev, "bridge to leave does not exists\n");
  970. return -ENOENT;
  971. }
  972. vlan_mapping.index = idx;
  973. vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
  974. err = gswip_pce_table_entry_read(priv, &vlan_mapping);
  975. if (err) {
  976. dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err);
  977. return err;
  978. }
  979. vlan_mapping.val[1] &= ~BIT(port);
  980. vlan_mapping.val[2] &= ~BIT(port);
  981. err = gswip_pce_table_entry_write(priv, &vlan_mapping);
  982. if (err) {
  983. dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
  984. return err;
  985. }
  986. /* In case all ports are removed from the bridge, remove the VLAN */
  987. if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
  988. err = gswip_vlan_active_remove(priv, idx);
  989. if (err) {
  990. dev_err(priv->dev, "failed to write active VLAN: %d\n",
  991. err);
  992. return err;
  993. }
  994. }
  995. /* GSWIP 2.2 (GRX300) and later program here the VID directly. */
  996. if (pvid)
  997. gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
  998. return 0;
  999. }
  1000. static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
  1001. struct dsa_bridge bridge,
  1002. bool *tx_fwd_offload,
  1003. struct netlink_ext_ack *extack)
  1004. {
  1005. struct net_device *br = bridge.dev;
  1006. struct gswip_priv *priv = ds->priv;
  1007. int err;
  1008. /* When the bridge uses VLAN filtering we have to configure VLAN
  1009. * specific bridges. No bridge is configured here.
  1010. */
  1011. if (!br_vlan_enabled(br)) {
  1012. err = gswip_vlan_add_unaware(priv, br, port);
  1013. if (err)
  1014. return err;
  1015. priv->port_vlan_filter &= ~BIT(port);
  1016. } else {
  1017. priv->port_vlan_filter |= BIT(port);
  1018. }
  1019. return gswip_add_single_port_br(priv, port, false);
  1020. }
  1021. static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
  1022. struct dsa_bridge bridge)
  1023. {
  1024. struct net_device *br = bridge.dev;
  1025. struct gswip_priv *priv = ds->priv;
  1026. gswip_add_single_port_br(priv, port, true);
  1027. /* When the bridge uses VLAN filtering we have to configure VLAN
  1028. * specific bridges. No bridge is configured here.
  1029. */
  1030. if (!br_vlan_enabled(br))
  1031. gswip_vlan_remove(priv, br, port, 0, true, false);
  1032. }
  1033. static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
  1034. const struct switchdev_obj_port_vlan *vlan,
  1035. struct netlink_ext_ack *extack)
  1036. {
  1037. struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
  1038. struct gswip_priv *priv = ds->priv;
  1039. unsigned int max_ports = priv->hw_info->max_ports;
  1040. int pos = max_ports;
  1041. int i, idx = -1;
  1042. /* We only support VLAN filtering on bridges */
  1043. if (!dsa_is_cpu_port(ds, port) && !bridge)
  1044. return -EOPNOTSUPP;
  1045. /* Check if there is already a page for this VLAN */
  1046. for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
  1047. if (priv->vlans[i].bridge == bridge &&
  1048. priv->vlans[i].vid == vlan->vid) {
  1049. idx = i;
  1050. break;
  1051. }
  1052. }
  1053. /* If this VLAN is not programmed yet, we have to reserve
  1054. * one entry in the VLAN table. Make sure we start at the
  1055. * next position round.
  1056. */
  1057. if (idx == -1) {
  1058. /* Look for a free slot */
  1059. for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
  1060. if (!priv->vlans[pos].bridge) {
  1061. idx = pos;
  1062. pos++;
  1063. break;
  1064. }
  1065. }
  1066. if (idx == -1) {
  1067. NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table");
  1068. return -ENOSPC;
  1069. }
  1070. }
  1071. return 0;
  1072. }
  1073. static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
  1074. const struct switchdev_obj_port_vlan *vlan,
  1075. struct netlink_ext_ack *extack)
  1076. {
  1077. struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
  1078. struct gswip_priv *priv = ds->priv;
  1079. bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  1080. bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  1081. int err;
  1082. err = gswip_port_vlan_prepare(ds, port, vlan, extack);
  1083. if (err)
  1084. return err;
  1085. /* We have to receive all packets on the CPU port and should not
  1086. * do any VLAN filtering here. This is also called with bridge
  1087. * NULL and then we do not know for which bridge to configure
  1088. * this.
  1089. */
  1090. if (dsa_is_cpu_port(ds, port))
  1091. return 0;
  1092. return gswip_vlan_add_aware(priv, bridge, port, vlan->vid,
  1093. untagged, pvid);
  1094. }
  1095. static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
  1096. const struct switchdev_obj_port_vlan *vlan)
  1097. {
  1098. struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
  1099. struct gswip_priv *priv = ds->priv;
  1100. bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  1101. /* We have to receive all packets on the CPU port and should not
  1102. * do any VLAN filtering here. This is also called with bridge
  1103. * NULL and then we do not know for which bridge to configure
  1104. * this.
  1105. */
  1106. if (dsa_is_cpu_port(ds, port))
  1107. return 0;
  1108. return gswip_vlan_remove(priv, bridge, port, vlan->vid, pvid, true);
  1109. }
  1110. static void gswip_port_fast_age(struct dsa_switch *ds, int port)
  1111. {
  1112. struct gswip_priv *priv = ds->priv;
  1113. struct gswip_pce_table_entry mac_bridge = {0,};
  1114. int i;
  1115. int err;
  1116. for (i = 0; i < 2048; i++) {
  1117. mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
  1118. mac_bridge.index = i;
  1119. err = gswip_pce_table_entry_read(priv, &mac_bridge);
  1120. if (err) {
  1121. dev_err(priv->dev, "failed to read mac bridge: %d\n",
  1122. err);
  1123. return;
  1124. }
  1125. if (!mac_bridge.valid)
  1126. continue;
  1127. if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC)
  1128. continue;
  1129. if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port)
  1130. continue;
  1131. mac_bridge.valid = false;
  1132. err = gswip_pce_table_entry_write(priv, &mac_bridge);
  1133. if (err) {
  1134. dev_err(priv->dev, "failed to write mac bridge: %d\n",
  1135. err);
  1136. return;
  1137. }
  1138. }
  1139. }
  1140. static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
  1141. {
  1142. struct gswip_priv *priv = ds->priv;
  1143. u32 stp_state;
  1144. switch (state) {
  1145. case BR_STATE_DISABLED:
  1146. gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
  1147. GSWIP_SDMA_PCTRLp(port));
  1148. return;
  1149. case BR_STATE_BLOCKING:
  1150. case BR_STATE_LISTENING:
  1151. stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
  1152. break;
  1153. case BR_STATE_LEARNING:
  1154. stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
  1155. break;
  1156. case BR_STATE_FORWARDING:
  1157. stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
  1158. break;
  1159. default:
  1160. dev_err(priv->dev, "invalid STP state: %d\n", state);
  1161. return;
  1162. }
  1163. gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
  1164. GSWIP_SDMA_PCTRLp(port));
  1165. gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state,
  1166. GSWIP_PCE_PCTRL_0p(port));
  1167. }
  1168. static int gswip_port_fdb(struct dsa_switch *ds, int port,
  1169. const unsigned char *addr, u16 vid, bool add)
  1170. {
  1171. struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
  1172. struct gswip_priv *priv = ds->priv;
  1173. struct gswip_pce_table_entry mac_bridge = {0,};
  1174. unsigned int max_ports = priv->hw_info->max_ports;
  1175. int fid = -1;
  1176. int i;
  1177. int err;
  1178. if (!bridge)
  1179. return -EINVAL;
  1180. for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
  1181. if (priv->vlans[i].bridge == bridge) {
  1182. fid = priv->vlans[i].fid;
  1183. break;
  1184. }
  1185. }
  1186. if (fid == -1) {
  1187. dev_err(priv->dev, "Port not part of a bridge\n");
  1188. return -EINVAL;
  1189. }
  1190. mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
  1191. mac_bridge.key_mode = true;
  1192. mac_bridge.key[0] = addr[5] | (addr[4] << 8);
  1193. mac_bridge.key[1] = addr[3] | (addr[2] << 8);
  1194. mac_bridge.key[2] = addr[1] | (addr[0] << 8);
  1195. mac_bridge.key[3] = fid;
  1196. mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
  1197. mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC;
  1198. mac_bridge.valid = add;
  1199. err = gswip_pce_table_entry_write(priv, &mac_bridge);
  1200. if (err)
  1201. dev_err(priv->dev, "failed to write mac bridge: %d\n", err);
  1202. return err;
  1203. }
  1204. static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
  1205. const unsigned char *addr, u16 vid,
  1206. struct dsa_db db)
  1207. {
  1208. return gswip_port_fdb(ds, port, addr, vid, true);
  1209. }
  1210. static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
  1211. const unsigned char *addr, u16 vid,
  1212. struct dsa_db db)
  1213. {
  1214. return gswip_port_fdb(ds, port, addr, vid, false);
  1215. }
  1216. static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
  1217. dsa_fdb_dump_cb_t *cb, void *data)
  1218. {
  1219. struct gswip_priv *priv = ds->priv;
  1220. struct gswip_pce_table_entry mac_bridge = {0,};
  1221. unsigned char addr[6];
  1222. int i;
  1223. int err;
  1224. for (i = 0; i < 2048; i++) {
  1225. mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
  1226. mac_bridge.index = i;
  1227. err = gswip_pce_table_entry_read(priv, &mac_bridge);
  1228. if (err) {
  1229. dev_err(priv->dev,
  1230. "failed to read mac bridge entry %d: %d\n",
  1231. i, err);
  1232. return err;
  1233. }
  1234. if (!mac_bridge.valid)
  1235. continue;
  1236. addr[5] = mac_bridge.key[0] & 0xff;
  1237. addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
  1238. addr[3] = mac_bridge.key[1] & 0xff;
  1239. addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
  1240. addr[1] = mac_bridge.key[2] & 0xff;
  1241. addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
  1242. if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
  1243. if (mac_bridge.val[0] & BIT(port)) {
  1244. err = cb(addr, 0, true, data);
  1245. if (err)
  1246. return err;
  1247. }
  1248. } else {
  1249. if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
  1250. err = cb(addr, 0, false, data);
  1251. if (err)
  1252. return err;
  1253. }
  1254. }
  1255. }
  1256. return 0;
  1257. }
  1258. static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
  1259. {
  1260. /* Includes 8 bytes for special header. */
  1261. return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
  1262. }
  1263. static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
  1264. {
  1265. struct gswip_priv *priv = ds->priv;
  1266. int cpu_port = priv->hw_info->cpu_port;
  1267. /* CPU port always has maximum mtu of user ports, so use it to set
  1268. * switch frame size, including 8 byte special header.
  1269. */
  1270. if (port == cpu_port) {
  1271. new_mtu += 8;
  1272. gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN,
  1273. GSWIP_MAC_FLEN);
  1274. }
  1275. /* Enable MLEN for ports with non-standard MTUs, including the special
  1276. * header on the CPU port added above.
  1277. */
  1278. if (new_mtu != ETH_DATA_LEN)
  1279. gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
  1280. GSWIP_MAC_CTRL_2p(port));
  1281. else
  1282. gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0,
  1283. GSWIP_MAC_CTRL_2p(port));
  1284. return 0;
  1285. }
  1286. static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
  1287. struct phylink_config *config)
  1288. {
  1289. switch (port) {
  1290. case 0:
  1291. case 1:
  1292. phy_interface_set_rgmii(config->supported_interfaces);
  1293. __set_bit(PHY_INTERFACE_MODE_MII,
  1294. config->supported_interfaces);
  1295. __set_bit(PHY_INTERFACE_MODE_REVMII,
  1296. config->supported_interfaces);
  1297. __set_bit(PHY_INTERFACE_MODE_RMII,
  1298. config->supported_interfaces);
  1299. break;
  1300. case 2:
  1301. case 3:
  1302. case 4:
  1303. __set_bit(PHY_INTERFACE_MODE_INTERNAL,
  1304. config->supported_interfaces);
  1305. break;
  1306. case 5:
  1307. phy_interface_set_rgmii(config->supported_interfaces);
  1308. __set_bit(PHY_INTERFACE_MODE_INTERNAL,
  1309. config->supported_interfaces);
  1310. break;
  1311. }
  1312. config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
  1313. MAC_10 | MAC_100 | MAC_1000;
  1314. }
  1315. static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
  1316. struct phylink_config *config)
  1317. {
  1318. switch (port) {
  1319. case 0:
  1320. phy_interface_set_rgmii(config->supported_interfaces);
  1321. __set_bit(PHY_INTERFACE_MODE_GMII,
  1322. config->supported_interfaces);
  1323. __set_bit(PHY_INTERFACE_MODE_RMII,
  1324. config->supported_interfaces);
  1325. break;
  1326. case 1:
  1327. case 2:
  1328. case 3:
  1329. case 4:
  1330. __set_bit(PHY_INTERFACE_MODE_INTERNAL,
  1331. config->supported_interfaces);
  1332. break;
  1333. case 5:
  1334. phy_interface_set_rgmii(config->supported_interfaces);
  1335. __set_bit(PHY_INTERFACE_MODE_INTERNAL,
  1336. config->supported_interfaces);
  1337. __set_bit(PHY_INTERFACE_MODE_RMII,
  1338. config->supported_interfaces);
  1339. break;
  1340. }
  1341. config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
  1342. MAC_10 | MAC_100 | MAC_1000;
  1343. }
  1344. static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
  1345. {
  1346. u32 mdio_phy;
  1347. if (link)
  1348. mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
  1349. else
  1350. mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
  1351. gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
  1352. GSWIP_MDIO_PHYp(port));
  1353. }
  1354. static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
  1355. phy_interface_t interface)
  1356. {
  1357. u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
  1358. switch (speed) {
  1359. case SPEED_10:
  1360. mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
  1361. if (interface == PHY_INTERFACE_MODE_RMII)
  1362. mii_cfg = GSWIP_MII_CFG_RATE_M50;
  1363. else
  1364. mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
  1365. mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
  1366. break;
  1367. case SPEED_100:
  1368. mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
  1369. if (interface == PHY_INTERFACE_MODE_RMII)
  1370. mii_cfg = GSWIP_MII_CFG_RATE_M50;
  1371. else
  1372. mii_cfg = GSWIP_MII_CFG_RATE_M25;
  1373. mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
  1374. break;
  1375. case SPEED_1000:
  1376. mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
  1377. mii_cfg = GSWIP_MII_CFG_RATE_M125;
  1378. mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
  1379. break;
  1380. }
  1381. gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
  1382. GSWIP_MDIO_PHYp(port));
  1383. gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
  1384. gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
  1385. GSWIP_MAC_CTRL_0p(port));
  1386. }
  1387. static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
  1388. {
  1389. u32 mac_ctrl_0, mdio_phy;
  1390. if (duplex == DUPLEX_FULL) {
  1391. mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
  1392. mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
  1393. } else {
  1394. mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
  1395. mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
  1396. }
  1397. gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
  1398. GSWIP_MAC_CTRL_0p(port));
  1399. gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
  1400. GSWIP_MDIO_PHYp(port));
  1401. }
  1402. static void gswip_port_set_pause(struct gswip_priv *priv, int port,
  1403. bool tx_pause, bool rx_pause)
  1404. {
  1405. u32 mac_ctrl_0, mdio_phy;
  1406. if (tx_pause && rx_pause) {
  1407. mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
  1408. mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
  1409. GSWIP_MDIO_PHY_FCONRX_EN;
  1410. } else if (tx_pause) {
  1411. mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
  1412. mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
  1413. GSWIP_MDIO_PHY_FCONRX_DIS;
  1414. } else if (rx_pause) {
  1415. mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
  1416. mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
  1417. GSWIP_MDIO_PHY_FCONRX_EN;
  1418. } else {
  1419. mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
  1420. mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
  1421. GSWIP_MDIO_PHY_FCONRX_DIS;
  1422. }
  1423. gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
  1424. mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
  1425. gswip_mdio_mask(priv,
  1426. GSWIP_MDIO_PHY_FCONTX_MASK |
  1427. GSWIP_MDIO_PHY_FCONRX_MASK,
  1428. mdio_phy, GSWIP_MDIO_PHYp(port));
  1429. }
  1430. static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
  1431. unsigned int mode,
  1432. const struct phylink_link_state *state)
  1433. {
  1434. struct gswip_priv *priv = ds->priv;
  1435. u32 miicfg = 0;
  1436. miicfg |= GSWIP_MII_CFG_LDCLKDIS;
  1437. switch (state->interface) {
  1438. case PHY_INTERFACE_MODE_MII:
  1439. case PHY_INTERFACE_MODE_INTERNAL:
  1440. miicfg |= GSWIP_MII_CFG_MODE_MIIM;
  1441. break;
  1442. case PHY_INTERFACE_MODE_REVMII:
  1443. miicfg |= GSWIP_MII_CFG_MODE_MIIP;
  1444. break;
  1445. case PHY_INTERFACE_MODE_RMII:
  1446. miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
  1447. break;
  1448. case PHY_INTERFACE_MODE_RGMII:
  1449. case PHY_INTERFACE_MODE_RGMII_ID:
  1450. case PHY_INTERFACE_MODE_RGMII_RXID:
  1451. case PHY_INTERFACE_MODE_RGMII_TXID:
  1452. miicfg |= GSWIP_MII_CFG_MODE_RGMII;
  1453. break;
  1454. case PHY_INTERFACE_MODE_GMII:
  1455. miicfg |= GSWIP_MII_CFG_MODE_GMII;
  1456. break;
  1457. default:
  1458. dev_err(ds->dev,
  1459. "Unsupported interface: %d\n", state->interface);
  1460. return;
  1461. }
  1462. gswip_mii_mask_cfg(priv,
  1463. GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
  1464. GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
  1465. miicfg, port);
  1466. switch (state->interface) {
  1467. case PHY_INTERFACE_MODE_RGMII_ID:
  1468. gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK |
  1469. GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
  1470. break;
  1471. case PHY_INTERFACE_MODE_RGMII_RXID:
  1472. gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
  1473. break;
  1474. case PHY_INTERFACE_MODE_RGMII_TXID:
  1475. gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port);
  1476. break;
  1477. default:
  1478. break;
  1479. }
  1480. }
  1481. static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
  1482. unsigned int mode,
  1483. phy_interface_t interface)
  1484. {
  1485. struct gswip_priv *priv = ds->priv;
  1486. gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
  1487. if (!dsa_is_cpu_port(ds, port))
  1488. gswip_port_set_link(priv, port, false);
  1489. }
  1490. static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
  1491. unsigned int mode,
  1492. phy_interface_t interface,
  1493. struct phy_device *phydev,
  1494. int speed, int duplex,
  1495. bool tx_pause, bool rx_pause)
  1496. {
  1497. struct gswip_priv *priv = ds->priv;
  1498. if (!dsa_is_cpu_port(ds, port)) {
  1499. gswip_port_set_link(priv, port, true);
  1500. gswip_port_set_speed(priv, port, speed, interface);
  1501. gswip_port_set_duplex(priv, port, duplex);
  1502. gswip_port_set_pause(priv, port, tx_pause, rx_pause);
  1503. }
  1504. gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
  1505. }
  1506. static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
  1507. uint8_t *data)
  1508. {
  1509. int i;
  1510. if (stringset != ETH_SS_STATS)
  1511. return;
  1512. for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
  1513. strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name,
  1514. ETH_GSTRING_LEN);
  1515. }
  1516. static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
  1517. u32 index)
  1518. {
  1519. u32 result;
  1520. int err;
  1521. gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR);
  1522. gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK |
  1523. GSWIP_BM_RAM_CTRL_OPMOD,
  1524. table | GSWIP_BM_RAM_CTRL_BAS,
  1525. GSWIP_BM_RAM_CTRL);
  1526. err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
  1527. GSWIP_BM_RAM_CTRL_BAS);
  1528. if (err) {
  1529. dev_err(priv->dev, "timeout while reading table: %u, index: %u",
  1530. table, index);
  1531. return 0;
  1532. }
  1533. result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0));
  1534. result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16;
  1535. return result;
  1536. }
  1537. static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
  1538. uint64_t *data)
  1539. {
  1540. struct gswip_priv *priv = ds->priv;
  1541. const struct gswip_rmon_cnt_desc *rmon_cnt;
  1542. int i;
  1543. u64 high;
  1544. for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
  1545. rmon_cnt = &gswip_rmon_cnt[i];
  1546. data[i] = gswip_bcm_ram_entry_read(priv, port,
  1547. rmon_cnt->offset);
  1548. if (rmon_cnt->size == 2) {
  1549. high = gswip_bcm_ram_entry_read(priv, port,
  1550. rmon_cnt->offset + 1);
  1551. data[i] |= high << 32;
  1552. }
  1553. }
  1554. }
  1555. static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
  1556. {
  1557. if (sset != ETH_SS_STATS)
  1558. return 0;
  1559. return ARRAY_SIZE(gswip_rmon_cnt);
  1560. }
  1561. static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
  1562. .get_tag_protocol = gswip_get_tag_protocol,
  1563. .setup = gswip_setup,
  1564. .port_enable = gswip_port_enable,
  1565. .port_disable = gswip_port_disable,
  1566. .port_bridge_join = gswip_port_bridge_join,
  1567. .port_bridge_leave = gswip_port_bridge_leave,
  1568. .port_fast_age = gswip_port_fast_age,
  1569. .port_vlan_filtering = gswip_port_vlan_filtering,
  1570. .port_vlan_add = gswip_port_vlan_add,
  1571. .port_vlan_del = gswip_port_vlan_del,
  1572. .port_stp_state_set = gswip_port_stp_state_set,
  1573. .port_fdb_add = gswip_port_fdb_add,
  1574. .port_fdb_del = gswip_port_fdb_del,
  1575. .port_fdb_dump = gswip_port_fdb_dump,
  1576. .port_change_mtu = gswip_port_change_mtu,
  1577. .port_max_mtu = gswip_port_max_mtu,
  1578. .phylink_get_caps = gswip_xrx200_phylink_get_caps,
  1579. .phylink_mac_config = gswip_phylink_mac_config,
  1580. .phylink_mac_link_down = gswip_phylink_mac_link_down,
  1581. .phylink_mac_link_up = gswip_phylink_mac_link_up,
  1582. .get_strings = gswip_get_strings,
  1583. .get_ethtool_stats = gswip_get_ethtool_stats,
  1584. .get_sset_count = gswip_get_sset_count,
  1585. };
  1586. static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
  1587. .get_tag_protocol = gswip_get_tag_protocol,
  1588. .setup = gswip_setup,
  1589. .port_enable = gswip_port_enable,
  1590. .port_disable = gswip_port_disable,
  1591. .port_bridge_join = gswip_port_bridge_join,
  1592. .port_bridge_leave = gswip_port_bridge_leave,
  1593. .port_fast_age = gswip_port_fast_age,
  1594. .port_vlan_filtering = gswip_port_vlan_filtering,
  1595. .port_vlan_add = gswip_port_vlan_add,
  1596. .port_vlan_del = gswip_port_vlan_del,
  1597. .port_stp_state_set = gswip_port_stp_state_set,
  1598. .port_fdb_add = gswip_port_fdb_add,
  1599. .port_fdb_del = gswip_port_fdb_del,
  1600. .port_fdb_dump = gswip_port_fdb_dump,
  1601. .port_change_mtu = gswip_port_change_mtu,
  1602. .port_max_mtu = gswip_port_max_mtu,
  1603. .phylink_get_caps = gswip_xrx300_phylink_get_caps,
  1604. .phylink_mac_config = gswip_phylink_mac_config,
  1605. .phylink_mac_link_down = gswip_phylink_mac_link_down,
  1606. .phylink_mac_link_up = gswip_phylink_mac_link_up,
  1607. .get_strings = gswip_get_strings,
  1608. .get_ethtool_stats = gswip_get_ethtool_stats,
  1609. .get_sset_count = gswip_get_sset_count,
  1610. };
  1611. static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
  1612. .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
  1613. .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
  1614. };
  1615. static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
  1616. .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
  1617. .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
  1618. };
  1619. static const struct xway_gphy_match_data xrx300_gphy_data = {
  1620. .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
  1621. .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
  1622. };
  1623. static const struct of_device_id xway_gphy_match[] = {
  1624. { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
  1625. { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
  1626. { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
  1627. { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
  1628. { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
  1629. {},
  1630. };
  1631. static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
  1632. {
  1633. struct device *dev = priv->dev;
  1634. const struct firmware *fw;
  1635. void *fw_addr;
  1636. dma_addr_t dma_addr;
  1637. dma_addr_t dev_addr;
  1638. size_t size;
  1639. int ret;
  1640. ret = clk_prepare_enable(gphy_fw->clk_gate);
  1641. if (ret)
  1642. return ret;
  1643. reset_control_assert(gphy_fw->reset);
  1644. /* The vendor BSP uses a 200ms delay after asserting the reset line.
  1645. * Without this some users are observing that the PHY is not coming up
  1646. * on the MDIO bus.
  1647. */
  1648. msleep(200);
  1649. ret = request_firmware(&fw, gphy_fw->fw_name, dev);
  1650. if (ret) {
  1651. dev_err(dev, "failed to load firmware: %s, error: %i\n",
  1652. gphy_fw->fw_name, ret);
  1653. return ret;
  1654. }
  1655. /* GPHY cores need the firmware code in a persistent and contiguous
  1656. * memory area with a 16 kB boundary aligned start address.
  1657. */
  1658. size = fw->size + XRX200_GPHY_FW_ALIGN;
  1659. fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
  1660. if (fw_addr) {
  1661. fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
  1662. dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
  1663. memcpy(fw_addr, fw->data, fw->size);
  1664. } else {
  1665. dev_err(dev, "failed to alloc firmware memory\n");
  1666. release_firmware(fw);
  1667. return -ENOMEM;
  1668. }
  1669. release_firmware(fw);
  1670. ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
  1671. if (ret)
  1672. return ret;
  1673. reset_control_deassert(gphy_fw->reset);
  1674. return ret;
  1675. }
  1676. static int gswip_gphy_fw_probe(struct gswip_priv *priv,
  1677. struct gswip_gphy_fw *gphy_fw,
  1678. struct device_node *gphy_fw_np, int i)
  1679. {
  1680. struct device *dev = priv->dev;
  1681. u32 gphy_mode;
  1682. int ret;
  1683. char gphyname[10];
  1684. snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
  1685. gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
  1686. if (IS_ERR(gphy_fw->clk_gate)) {
  1687. dev_err(dev, "Failed to lookup gate clock\n");
  1688. return PTR_ERR(gphy_fw->clk_gate);
  1689. }
  1690. ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
  1691. if (ret)
  1692. return ret;
  1693. ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
  1694. /* Default to GE mode */
  1695. if (ret)
  1696. gphy_mode = GPHY_MODE_GE;
  1697. switch (gphy_mode) {
  1698. case GPHY_MODE_FE:
  1699. gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
  1700. break;
  1701. case GPHY_MODE_GE:
  1702. gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
  1703. break;
  1704. default:
  1705. dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode);
  1706. return -EINVAL;
  1707. }
  1708. gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
  1709. if (IS_ERR(gphy_fw->reset))
  1710. return dev_err_probe(dev, PTR_ERR(gphy_fw->reset),
  1711. "Failed to lookup gphy reset\n");
  1712. return gswip_gphy_fw_load(priv, gphy_fw);
  1713. }
  1714. static void gswip_gphy_fw_remove(struct gswip_priv *priv,
  1715. struct gswip_gphy_fw *gphy_fw)
  1716. {
  1717. int ret;
  1718. /* check if the device was fully probed */
  1719. if (!gphy_fw->fw_name)
  1720. return;
  1721. ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
  1722. if (ret)
  1723. dev_err(priv->dev, "can not reset GPHY FW pointer");
  1724. clk_disable_unprepare(gphy_fw->clk_gate);
  1725. reset_control_put(gphy_fw->reset);
  1726. }
  1727. static int gswip_gphy_fw_list(struct gswip_priv *priv,
  1728. struct device_node *gphy_fw_list_np, u32 version)
  1729. {
  1730. struct device *dev = priv->dev;
  1731. struct device_node *gphy_fw_np;
  1732. const struct of_device_id *match;
  1733. int err;
  1734. int i = 0;
  1735. /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
  1736. * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
  1737. * needs a different GPHY firmware.
  1738. */
  1739. if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
  1740. switch (version) {
  1741. case GSWIP_VERSION_2_0:
  1742. priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
  1743. break;
  1744. case GSWIP_VERSION_2_1:
  1745. priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
  1746. break;
  1747. default:
  1748. dev_err(dev, "unknown GSWIP version: 0x%x", version);
  1749. return -ENOENT;
  1750. }
  1751. }
  1752. match = of_match_node(xway_gphy_match, gphy_fw_list_np);
  1753. if (match && match->data)
  1754. priv->gphy_fw_name_cfg = match->data;
  1755. if (!priv->gphy_fw_name_cfg) {
  1756. dev_err(dev, "GPHY compatible type not supported");
  1757. return -ENOENT;
  1758. }
  1759. priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
  1760. if (!priv->num_gphy_fw)
  1761. return -ENOENT;
  1762. priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
  1763. "lantiq,rcu");
  1764. if (IS_ERR(priv->rcu_regmap))
  1765. return PTR_ERR(priv->rcu_regmap);
  1766. priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
  1767. sizeof(*priv->gphy_fw),
  1768. GFP_KERNEL | __GFP_ZERO);
  1769. if (!priv->gphy_fw)
  1770. return -ENOMEM;
  1771. for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
  1772. err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
  1773. gphy_fw_np, i);
  1774. if (err) {
  1775. of_node_put(gphy_fw_np);
  1776. goto remove_gphy;
  1777. }
  1778. i++;
  1779. }
  1780. /* The standalone PHY11G requires 300ms to be fully
  1781. * initialized and ready for any MDIO communication after being
  1782. * taken out of reset. For the SoC-internal GPHY variant there
  1783. * is no (known) documentation for the minimum time after a
  1784. * reset. Use the same value as for the standalone variant as
  1785. * some users have reported internal PHYs not being detected
  1786. * without any delay.
  1787. */
  1788. msleep(300);
  1789. return 0;
  1790. remove_gphy:
  1791. for (i = 0; i < priv->num_gphy_fw; i++)
  1792. gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
  1793. return err;
  1794. }
  1795. static int gswip_probe(struct platform_device *pdev)
  1796. {
  1797. struct gswip_priv *priv;
  1798. struct device_node *np, *mdio_np, *gphy_fw_np;
  1799. struct device *dev = &pdev->dev;
  1800. int err;
  1801. int i;
  1802. u32 version;
  1803. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  1804. if (!priv)
  1805. return -ENOMEM;
  1806. priv->gswip = devm_platform_ioremap_resource(pdev, 0);
  1807. if (IS_ERR(priv->gswip))
  1808. return PTR_ERR(priv->gswip);
  1809. priv->mdio = devm_platform_ioremap_resource(pdev, 1);
  1810. if (IS_ERR(priv->mdio))
  1811. return PTR_ERR(priv->mdio);
  1812. priv->mii = devm_platform_ioremap_resource(pdev, 2);
  1813. if (IS_ERR(priv->mii))
  1814. return PTR_ERR(priv->mii);
  1815. priv->hw_info = of_device_get_match_data(dev);
  1816. if (!priv->hw_info)
  1817. return -EINVAL;
  1818. priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
  1819. if (!priv->ds)
  1820. return -ENOMEM;
  1821. priv->ds->dev = dev;
  1822. priv->ds->num_ports = priv->hw_info->max_ports;
  1823. priv->ds->priv = priv;
  1824. priv->ds->ops = priv->hw_info->ops;
  1825. priv->dev = dev;
  1826. mutex_init(&priv->pce_table_lock);
  1827. version = gswip_switch_r(priv, GSWIP_VERSION);
  1828. np = dev->of_node;
  1829. switch (version) {
  1830. case GSWIP_VERSION_2_0:
  1831. case GSWIP_VERSION_2_1:
  1832. if (!of_device_is_compatible(np, "lantiq,xrx200-gswip"))
  1833. return -EINVAL;
  1834. break;
  1835. case GSWIP_VERSION_2_2:
  1836. case GSWIP_VERSION_2_2_ETC:
  1837. if (!of_device_is_compatible(np, "lantiq,xrx300-gswip") &&
  1838. !of_device_is_compatible(np, "lantiq,xrx330-gswip"))
  1839. return -EINVAL;
  1840. break;
  1841. default:
  1842. dev_err(dev, "unknown GSWIP version: 0x%x", version);
  1843. return -ENOENT;
  1844. }
  1845. /* bring up the mdio bus */
  1846. gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
  1847. if (gphy_fw_np) {
  1848. err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
  1849. of_node_put(gphy_fw_np);
  1850. if (err) {
  1851. dev_err(dev, "gphy fw probe failed\n");
  1852. return err;
  1853. }
  1854. }
  1855. /* bring up the mdio bus */
  1856. mdio_np = of_get_compatible_child(dev->of_node, "lantiq,xrx200-mdio");
  1857. if (mdio_np) {
  1858. err = gswip_mdio(priv, mdio_np);
  1859. if (err) {
  1860. dev_err(dev, "mdio probe failed\n");
  1861. goto put_mdio_node;
  1862. }
  1863. }
  1864. err = dsa_register_switch(priv->ds);
  1865. if (err) {
  1866. dev_err(dev, "dsa switch register failed: %i\n", err);
  1867. goto mdio_bus;
  1868. }
  1869. if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
  1870. dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
  1871. priv->hw_info->cpu_port);
  1872. err = -EINVAL;
  1873. goto disable_switch;
  1874. }
  1875. platform_set_drvdata(pdev, priv);
  1876. dev_info(dev, "probed GSWIP version %lx mod %lx\n",
  1877. (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT,
  1878. (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
  1879. return 0;
  1880. disable_switch:
  1881. gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
  1882. dsa_unregister_switch(priv->ds);
  1883. mdio_bus:
  1884. if (mdio_np) {
  1885. mdiobus_unregister(priv->ds->slave_mii_bus);
  1886. mdiobus_free(priv->ds->slave_mii_bus);
  1887. }
  1888. put_mdio_node:
  1889. of_node_put(mdio_np);
  1890. for (i = 0; i < priv->num_gphy_fw; i++)
  1891. gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
  1892. return err;
  1893. }
  1894. static int gswip_remove(struct platform_device *pdev)
  1895. {
  1896. struct gswip_priv *priv = platform_get_drvdata(pdev);
  1897. int i;
  1898. if (!priv)
  1899. return 0;
  1900. /* disable the switch */
  1901. gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
  1902. dsa_unregister_switch(priv->ds);
  1903. if (priv->ds->slave_mii_bus) {
  1904. mdiobus_unregister(priv->ds->slave_mii_bus);
  1905. of_node_put(priv->ds->slave_mii_bus->dev.of_node);
  1906. mdiobus_free(priv->ds->slave_mii_bus);
  1907. }
  1908. for (i = 0; i < priv->num_gphy_fw; i++)
  1909. gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
  1910. return 0;
  1911. }
  1912. static void gswip_shutdown(struct platform_device *pdev)
  1913. {
  1914. struct gswip_priv *priv = platform_get_drvdata(pdev);
  1915. if (!priv)
  1916. return;
  1917. dsa_switch_shutdown(priv->ds);
  1918. platform_set_drvdata(pdev, NULL);
  1919. }
  1920. static const struct gswip_hw_info gswip_xrx200 = {
  1921. .max_ports = 7,
  1922. .cpu_port = 6,
  1923. .ops = &gswip_xrx200_switch_ops,
  1924. };
  1925. static const struct gswip_hw_info gswip_xrx300 = {
  1926. .max_ports = 7,
  1927. .cpu_port = 6,
  1928. .ops = &gswip_xrx300_switch_ops,
  1929. };
  1930. static const struct of_device_id gswip_of_match[] = {
  1931. { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
  1932. { .compatible = "lantiq,xrx300-gswip", .data = &gswip_xrx300 },
  1933. { .compatible = "lantiq,xrx330-gswip", .data = &gswip_xrx300 },
  1934. {},
  1935. };
  1936. MODULE_DEVICE_TABLE(of, gswip_of_match);
  1937. static struct platform_driver gswip_driver = {
  1938. .probe = gswip_probe,
  1939. .remove = gswip_remove,
  1940. .shutdown = gswip_shutdown,
  1941. .driver = {
  1942. .name = "gswip",
  1943. .of_match_table = gswip_of_match,
  1944. },
  1945. };
  1946. module_platform_driver(gswip_driver);
  1947. MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
  1948. MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
  1949. MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
  1950. MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
  1951. MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
  1952. MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
  1953. MODULE_AUTHOR("Hauke Mehrtens <[email protected]>");
  1954. MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
  1955. MODULE_LICENSE("GPL v2");