netcp_ethss.c 106 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Keystone GBE and XGBE subsystem code
  4. *
  5. * Copyright (C) 2014 Texas Instruments Incorporated
  6. * Authors: Sandeep Nair <[email protected]>
  7. * Sandeep Paulraj <[email protected]>
  8. * Cyril Chemparathy <[email protected]>
  9. * Santosh Shilimkar <[email protected]>
  10. * Wingman Kwok <[email protected]>
  11. */
  12. #include <linux/io.h>
  13. #include <linux/module.h>
  14. #include <linux/of_mdio.h>
  15. #include <linux/of_net.h>
  16. #include <linux/of_address.h>
  17. #include <linux/if_vlan.h>
  18. #include <linux/ptp_classify.h>
  19. #include <linux/net_tstamp.h>
  20. #include <linux/ethtool.h>
  21. #include "cpsw.h"
  22. #include "cpsw_ale.h"
  23. #include "netcp.h"
  24. #include "cpts.h"
  25. #define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver"
  26. #define NETCP_DRIVER_VERSION "v1.0"
  27. #define GBE_IDENT(reg) ((reg >> 16) & 0xffff)
  28. #define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
  29. #define GBE_MINOR_VERSION(reg) (reg & 0xff)
  30. #define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
  31. /* 1G Ethernet SS defines */
  32. #define GBE_MODULE_NAME "netcp-gbe"
  33. #define GBE_SS_VERSION_14 0x4ed2
  34. #define GBE_SS_REG_INDEX 0
  35. #define GBE_SGMII34_REG_INDEX 1
  36. #define GBE_SM_REG_INDEX 2
  37. /* offset relative to base of GBE_SS_REG_INDEX */
  38. #define GBE13_SGMII_MODULE_OFFSET 0x100
  39. /* offset relative to base of GBE_SM_REG_INDEX */
  40. #define GBE13_HOST_PORT_OFFSET 0x34
  41. #define GBE13_SLAVE_PORT_OFFSET 0x60
  42. #define GBE13_EMAC_OFFSET 0x100
  43. #define GBE13_SLAVE_PORT2_OFFSET 0x200
  44. #define GBE13_HW_STATS_OFFSET 0x300
  45. #define GBE13_CPTS_OFFSET 0x500
  46. #define GBE13_ALE_OFFSET 0x600
  47. #define GBE13_HOST_PORT_NUM 0
  48. /* 1G Ethernet NU SS defines */
  49. #define GBENU_MODULE_NAME "netcp-gbenu"
  50. #define GBE_SS_ID_NU 0x4ee6
  51. #define GBE_SS_ID_2U 0x4ee8
  52. #define IS_SS_ID_MU(d) \
  53. ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
  54. (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
  55. #define IS_SS_ID_NU(d) \
  56. (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
  57. #define IS_SS_ID_VER_14(d) \
  58. (GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14)
  59. #define IS_SS_ID_2U(d) \
  60. (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)
  61. #define GBENU_SS_REG_INDEX 0
  62. #define GBENU_SM_REG_INDEX 1
  63. #define GBENU_SGMII_MODULE_OFFSET 0x100
  64. #define GBENU_HOST_PORT_OFFSET 0x1000
  65. #define GBENU_SLAVE_PORT_OFFSET 0x2000
  66. #define GBENU_EMAC_OFFSET 0x2330
  67. #define GBENU_HW_STATS_OFFSET 0x1a000
  68. #define GBENU_CPTS_OFFSET 0x1d000
  69. #define GBENU_ALE_OFFSET 0x1e000
  70. #define GBENU_HOST_PORT_NUM 0
  71. #define GBENU_SGMII_MODULE_SIZE 0x100
  72. /* 10G Ethernet SS defines */
  73. #define XGBE_MODULE_NAME "netcp-xgbe"
  74. #define XGBE_SS_VERSION_10 0x4ee4
  75. #define XGBE_SS_REG_INDEX 0
  76. #define XGBE_SM_REG_INDEX 1
  77. #define XGBE_SERDES_REG_INDEX 2
  78. /* offset relative to base of XGBE_SS_REG_INDEX */
  79. #define XGBE10_SGMII_MODULE_OFFSET 0x100
  80. #define IS_SS_ID_XGBE(d) ((d)->ss_version == XGBE_SS_VERSION_10)
  81. /* offset relative to base of XGBE_SM_REG_INDEX */
  82. #define XGBE10_HOST_PORT_OFFSET 0x34
  83. #define XGBE10_SLAVE_PORT_OFFSET 0x64
  84. #define XGBE10_EMAC_OFFSET 0x400
  85. #define XGBE10_CPTS_OFFSET 0x600
  86. #define XGBE10_ALE_OFFSET 0x700
  87. #define XGBE10_HW_STATS_OFFSET 0x800
  88. #define XGBE10_HOST_PORT_NUM 0
  89. #define GBE_TIMER_INTERVAL (HZ / 2)
  90. /* Soft reset register values */
  91. #define SOFT_RESET_MASK BIT(0)
  92. #define SOFT_RESET BIT(0)
  93. #define DEVICE_EMACSL_RESET_POLL_COUNT 100
  94. #define GMACSL_RET_WARN_RESET_INCOMPLETE -2
  95. #define MACSL_RX_ENABLE_CSF BIT(23)
  96. #define MACSL_ENABLE_EXT_CTL BIT(18)
  97. #define MACSL_XGMII_ENABLE BIT(13)
  98. #define MACSL_XGIG_MODE BIT(8)
  99. #define MACSL_GIG_MODE BIT(7)
  100. #define MACSL_GMII_ENABLE BIT(5)
  101. #define MACSL_FULLDUPLEX BIT(0)
  102. #define GBE_CTL_P0_ENABLE BIT(2)
  103. #define ETH_SW_CTL_P0_TX_CRC_REMOVE BIT(13)
  104. #define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff
  105. #define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf
  106. #define GBE_STATS_CD_SEL BIT(28)
  107. #define GBE_PORT_MASK(x) (BIT(x) - 1)
  108. #define GBE_MASK_NO_PORTS 0
  109. #define GBE_DEF_1G_MAC_CONTROL \
  110. (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \
  111. MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
  112. #define GBE_DEF_10G_MAC_CONTROL \
  113. (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \
  114. MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
  115. #define GBE_STATSA_MODULE 0
  116. #define GBE_STATSB_MODULE 1
  117. #define GBE_STATSC_MODULE 2
  118. #define GBE_STATSD_MODULE 3
  119. #define GBENU_STATS0_MODULE 0
  120. #define GBENU_STATS1_MODULE 1
  121. #define GBENU_STATS2_MODULE 2
  122. #define GBENU_STATS3_MODULE 3
  123. #define GBENU_STATS4_MODULE 4
  124. #define GBENU_STATS5_MODULE 5
  125. #define GBENU_STATS6_MODULE 6
  126. #define GBENU_STATS7_MODULE 7
  127. #define GBENU_STATS8_MODULE 8
  128. #define XGBE_STATS0_MODULE 0
  129. #define XGBE_STATS1_MODULE 1
  130. #define XGBE_STATS2_MODULE 2
  131. /* s: 0-based slave_port */
  132. #define SGMII_BASE(d, s) \
  133. (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
  134. #define GBE_TX_QUEUE 648
  135. #define GBE_TXHOOK_ORDER 0
  136. #define GBE_RXHOOK_ORDER 0
  137. #define GBE_DEFAULT_ALE_AGEOUT 30
  138. #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
  139. #define SLAVE_LINK_IS_RGMII(s) \
  140. (((s)->link_interface >= RGMII_LINK_MAC_PHY) && \
  141. ((s)->link_interface <= RGMII_LINK_MAC_PHY_NO_MDIO))
  142. #define SLAVE_LINK_IS_SGMII(s) \
  143. ((s)->link_interface <= SGMII_LINK_MAC_PHY_NO_MDIO)
  144. #define NETCP_LINK_STATE_INVALID -1
  145. #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
  146. offsetof(struct gbe##_##rb, rn)
  147. #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
  148. offsetof(struct gbenu##_##rb, rn)
  149. #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
  150. offsetof(struct xgbe##_##rb, rn)
  151. #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
  152. #define HOST_TX_PRI_MAP_DEFAULT 0x00000000
  153. #if IS_ENABLED(CONFIG_TI_CPTS)
  154. /* Px_TS_CTL register fields */
  155. #define TS_RX_ANX_F_EN BIT(0)
  156. #define TS_RX_VLAN_LT1_EN BIT(1)
  157. #define TS_RX_VLAN_LT2_EN BIT(2)
  158. #define TS_RX_ANX_D_EN BIT(3)
  159. #define TS_TX_ANX_F_EN BIT(4)
  160. #define TS_TX_VLAN_LT1_EN BIT(5)
  161. #define TS_TX_VLAN_LT2_EN BIT(6)
  162. #define TS_TX_ANX_D_EN BIT(7)
  163. #define TS_LT2_EN BIT(8)
  164. #define TS_RX_ANX_E_EN BIT(9)
  165. #define TS_TX_ANX_E_EN BIT(10)
  166. #define TS_MSG_TYPE_EN_SHIFT 16
  167. #define TS_MSG_TYPE_EN_MASK 0xffff
  168. /* Px_TS_SEQ_LTYPE register fields */
  169. #define TS_SEQ_ID_OFS_SHIFT 16
  170. #define TS_SEQ_ID_OFS_MASK 0x3f
  171. /* Px_TS_CTL_LTYPE2 register fields */
  172. #define TS_107 BIT(16)
  173. #define TS_129 BIT(17)
  174. #define TS_130 BIT(18)
  175. #define TS_131 BIT(19)
  176. #define TS_132 BIT(20)
  177. #define TS_319 BIT(21)
  178. #define TS_320 BIT(22)
  179. #define TS_TTL_NONZERO BIT(23)
  180. #define TS_UNI_EN BIT(24)
  181. #define TS_UNI_EN_SHIFT 24
  182. #define TS_TX_ANX_ALL_EN \
  183. (TS_TX_ANX_D_EN | TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
  184. #define TS_RX_ANX_ALL_EN \
  185. (TS_RX_ANX_D_EN | TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
  186. #define TS_CTL_DST_PORT TS_319
  187. #define TS_CTL_DST_PORT_SHIFT 21
  188. #define TS_CTL_MADDR_ALL \
  189. (TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
  190. #define TS_CTL_MADDR_SHIFT 16
  191. /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
  192. #define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
  193. #endif /* CONFIG_TI_CPTS */
  194. struct xgbe_ss_regs {
  195. u32 id_ver;
  196. u32 synce_count;
  197. u32 synce_mux;
  198. u32 control;
  199. };
  200. struct xgbe_switch_regs {
  201. u32 id_ver;
  202. u32 control;
  203. u32 emcontrol;
  204. u32 stat_port_en;
  205. u32 ptype;
  206. u32 soft_idle;
  207. u32 thru_rate;
  208. u32 gap_thresh;
  209. u32 tx_start_wds;
  210. u32 flow_control;
  211. u32 cppi_thresh;
  212. };
  213. struct xgbe_port_regs {
  214. u32 blk_cnt;
  215. u32 port_vlan;
  216. u32 tx_pri_map;
  217. u32 sa_lo;
  218. u32 sa_hi;
  219. u32 ts_ctl;
  220. u32 ts_seq_ltype;
  221. u32 ts_vlan;
  222. u32 ts_ctl_ltype2;
  223. u32 ts_ctl2;
  224. u32 control;
  225. };
  226. struct xgbe_host_port_regs {
  227. u32 blk_cnt;
  228. u32 port_vlan;
  229. u32 tx_pri_map;
  230. u32 src_id;
  231. u32 rx_pri_map;
  232. u32 rx_maxlen;
  233. };
  234. struct xgbe_emac_regs {
  235. u32 id_ver;
  236. u32 mac_control;
  237. u32 mac_status;
  238. u32 soft_reset;
  239. u32 rx_maxlen;
  240. u32 __reserved_0;
  241. u32 rx_pause;
  242. u32 tx_pause;
  243. u32 em_control;
  244. u32 __reserved_1;
  245. u32 tx_gap;
  246. u32 rsvd[4];
  247. };
  248. struct xgbe_host_hw_stats {
  249. u32 rx_good_frames;
  250. u32 rx_broadcast_frames;
  251. u32 rx_multicast_frames;
  252. u32 __rsvd_0[3];
  253. u32 rx_oversized_frames;
  254. u32 __rsvd_1;
  255. u32 rx_undersized_frames;
  256. u32 __rsvd_2;
  257. u32 overrun_type4;
  258. u32 overrun_type5;
  259. u32 rx_bytes;
  260. u32 tx_good_frames;
  261. u32 tx_broadcast_frames;
  262. u32 tx_multicast_frames;
  263. u32 __rsvd_3[9];
  264. u32 tx_bytes;
  265. u32 tx_64byte_frames;
  266. u32 tx_65_to_127byte_frames;
  267. u32 tx_128_to_255byte_frames;
  268. u32 tx_256_to_511byte_frames;
  269. u32 tx_512_to_1023byte_frames;
  270. u32 tx_1024byte_frames;
  271. u32 net_bytes;
  272. u32 rx_sof_overruns;
  273. u32 rx_mof_overruns;
  274. u32 rx_dma_overruns;
  275. };
  276. struct xgbe_hw_stats {
  277. u32 rx_good_frames;
  278. u32 rx_broadcast_frames;
  279. u32 rx_multicast_frames;
  280. u32 rx_pause_frames;
  281. u32 rx_crc_errors;
  282. u32 rx_align_code_errors;
  283. u32 rx_oversized_frames;
  284. u32 rx_jabber_frames;
  285. u32 rx_undersized_frames;
  286. u32 rx_fragments;
  287. u32 overrun_type4;
  288. u32 overrun_type5;
  289. u32 rx_bytes;
  290. u32 tx_good_frames;
  291. u32 tx_broadcast_frames;
  292. u32 tx_multicast_frames;
  293. u32 tx_pause_frames;
  294. u32 tx_deferred_frames;
  295. u32 tx_collision_frames;
  296. u32 tx_single_coll_frames;
  297. u32 tx_mult_coll_frames;
  298. u32 tx_excessive_collisions;
  299. u32 tx_late_collisions;
  300. u32 tx_underrun;
  301. u32 tx_carrier_sense_errors;
  302. u32 tx_bytes;
  303. u32 tx_64byte_frames;
  304. u32 tx_65_to_127byte_frames;
  305. u32 tx_128_to_255byte_frames;
  306. u32 tx_256_to_511byte_frames;
  307. u32 tx_512_to_1023byte_frames;
  308. u32 tx_1024byte_frames;
  309. u32 net_bytes;
  310. u32 rx_sof_overruns;
  311. u32 rx_mof_overruns;
  312. u32 rx_dma_overruns;
  313. };
  314. struct gbenu_ss_regs {
  315. u32 id_ver;
  316. u32 synce_count; /* NU */
  317. u32 synce_mux; /* NU */
  318. u32 control; /* 2U */
  319. u32 __rsvd_0[2]; /* 2U */
  320. u32 rgmii_status; /* 2U */
  321. u32 ss_status; /* 2U */
  322. };
  323. struct gbenu_switch_regs {
  324. u32 id_ver;
  325. u32 control;
  326. u32 __rsvd_0[2];
  327. u32 emcontrol;
  328. u32 stat_port_en;
  329. u32 ptype; /* NU */
  330. u32 soft_idle;
  331. u32 thru_rate; /* NU */
  332. u32 gap_thresh; /* NU */
  333. u32 tx_start_wds; /* NU */
  334. u32 eee_prescale; /* 2U */
  335. u32 tx_g_oflow_thresh_set; /* NU */
  336. u32 tx_g_oflow_thresh_clr; /* NU */
  337. u32 tx_g_buf_thresh_set_l; /* NU */
  338. u32 tx_g_buf_thresh_set_h; /* NU */
  339. u32 tx_g_buf_thresh_clr_l; /* NU */
  340. u32 tx_g_buf_thresh_clr_h; /* NU */
  341. };
  342. struct gbenu_port_regs {
  343. u32 __rsvd_0;
  344. u32 control;
  345. u32 max_blks; /* 2U */
  346. u32 mem_align1;
  347. u32 blk_cnt;
  348. u32 port_vlan;
  349. u32 tx_pri_map; /* NU */
  350. u32 pri_ctl; /* 2U */
  351. u32 rx_pri_map;
  352. u32 rx_maxlen;
  353. u32 tx_blks_pri; /* NU */
  354. u32 __rsvd_1;
  355. u32 idle2lpi; /* 2U */
  356. u32 lpi2idle; /* 2U */
  357. u32 eee_status; /* 2U */
  358. u32 __rsvd_2;
  359. u32 __rsvd_3[176]; /* NU: more to add */
  360. u32 __rsvd_4[2];
  361. u32 sa_lo;
  362. u32 sa_hi;
  363. u32 ts_ctl;
  364. u32 ts_seq_ltype;
  365. u32 ts_vlan;
  366. u32 ts_ctl_ltype2;
  367. u32 ts_ctl2;
  368. };
  369. struct gbenu_host_port_regs {
  370. u32 __rsvd_0;
  371. u32 control;
  372. u32 flow_id_offset; /* 2U */
  373. u32 __rsvd_1;
  374. u32 blk_cnt;
  375. u32 port_vlan;
  376. u32 tx_pri_map; /* NU */
  377. u32 pri_ctl;
  378. u32 rx_pri_map;
  379. u32 rx_maxlen;
  380. u32 tx_blks_pri; /* NU */
  381. u32 __rsvd_2;
  382. u32 idle2lpi; /* 2U */
  383. u32 lpi2wake; /* 2U */
  384. u32 eee_status; /* 2U */
  385. u32 __rsvd_3;
  386. u32 __rsvd_4[184]; /* NU */
  387. u32 host_blks_pri; /* NU */
  388. };
  389. struct gbenu_emac_regs {
  390. u32 mac_control;
  391. u32 mac_status;
  392. u32 soft_reset;
  393. u32 boff_test;
  394. u32 rx_pause;
  395. u32 __rsvd_0[11]; /* NU */
  396. u32 tx_pause;
  397. u32 __rsvd_1[11]; /* NU */
  398. u32 em_control;
  399. u32 tx_gap;
  400. };
  401. /* Some hw stat regs are applicable to slave port only.
  402. * This is handled by gbenu_et_stats struct. Also some
  403. * are for SS version NU and some are for 2U.
  404. */
  405. struct gbenu_hw_stats {
  406. u32 rx_good_frames;
  407. u32 rx_broadcast_frames;
  408. u32 rx_multicast_frames;
  409. u32 rx_pause_frames; /* slave */
  410. u32 rx_crc_errors;
  411. u32 rx_align_code_errors; /* slave */
  412. u32 rx_oversized_frames;
  413. u32 rx_jabber_frames; /* slave */
  414. u32 rx_undersized_frames;
  415. u32 rx_fragments; /* slave */
  416. u32 ale_drop;
  417. u32 ale_overrun_drop;
  418. u32 rx_bytes;
  419. u32 tx_good_frames;
  420. u32 tx_broadcast_frames;
  421. u32 tx_multicast_frames;
  422. u32 tx_pause_frames; /* slave */
  423. u32 tx_deferred_frames; /* slave */
  424. u32 tx_collision_frames; /* slave */
  425. u32 tx_single_coll_frames; /* slave */
  426. u32 tx_mult_coll_frames; /* slave */
  427. u32 tx_excessive_collisions; /* slave */
  428. u32 tx_late_collisions; /* slave */
  429. u32 rx_ipg_error; /* slave 10G only */
  430. u32 tx_carrier_sense_errors; /* slave */
  431. u32 tx_bytes;
  432. u32 tx_64B_frames;
  433. u32 tx_65_to_127B_frames;
  434. u32 tx_128_to_255B_frames;
  435. u32 tx_256_to_511B_frames;
  436. u32 tx_512_to_1023B_frames;
  437. u32 tx_1024B_frames;
  438. u32 net_bytes;
  439. u32 rx_bottom_fifo_drop;
  440. u32 rx_port_mask_drop;
  441. u32 rx_top_fifo_drop;
  442. u32 ale_rate_limit_drop;
  443. u32 ale_vid_ingress_drop;
  444. u32 ale_da_eq_sa_drop;
  445. u32 __rsvd_0[3];
  446. u32 ale_unknown_ucast;
  447. u32 ale_unknown_ucast_bytes;
  448. u32 ale_unknown_mcast;
  449. u32 ale_unknown_mcast_bytes;
  450. u32 ale_unknown_bcast;
  451. u32 ale_unknown_bcast_bytes;
  452. u32 ale_pol_match;
  453. u32 ale_pol_match_red; /* NU */
  454. u32 ale_pol_match_yellow; /* NU */
  455. u32 __rsvd_1[44];
  456. u32 tx_mem_protect_err;
  457. /* following NU only */
  458. u32 tx_pri0;
  459. u32 tx_pri1;
  460. u32 tx_pri2;
  461. u32 tx_pri3;
  462. u32 tx_pri4;
  463. u32 tx_pri5;
  464. u32 tx_pri6;
  465. u32 tx_pri7;
  466. u32 tx_pri0_bcnt;
  467. u32 tx_pri1_bcnt;
  468. u32 tx_pri2_bcnt;
  469. u32 tx_pri3_bcnt;
  470. u32 tx_pri4_bcnt;
  471. u32 tx_pri5_bcnt;
  472. u32 tx_pri6_bcnt;
  473. u32 tx_pri7_bcnt;
  474. u32 tx_pri0_drop;
  475. u32 tx_pri1_drop;
  476. u32 tx_pri2_drop;
  477. u32 tx_pri3_drop;
  478. u32 tx_pri4_drop;
  479. u32 tx_pri5_drop;
  480. u32 tx_pri6_drop;
  481. u32 tx_pri7_drop;
  482. u32 tx_pri0_drop_bcnt;
  483. u32 tx_pri1_drop_bcnt;
  484. u32 tx_pri2_drop_bcnt;
  485. u32 tx_pri3_drop_bcnt;
  486. u32 tx_pri4_drop_bcnt;
  487. u32 tx_pri5_drop_bcnt;
  488. u32 tx_pri6_drop_bcnt;
  489. u32 tx_pri7_drop_bcnt;
  490. };
  491. #define GBENU_HW_STATS_REG_MAP_SZ 0x200
  492. struct gbe_ss_regs {
  493. u32 id_ver;
  494. u32 synce_count;
  495. u32 synce_mux;
  496. };
  497. struct gbe_ss_regs_ofs {
  498. u16 id_ver;
  499. u16 control;
  500. u16 rgmii_status; /* 2U */
  501. };
  502. struct gbe_switch_regs {
  503. u32 id_ver;
  504. u32 control;
  505. u32 soft_reset;
  506. u32 stat_port_en;
  507. u32 ptype;
  508. u32 soft_idle;
  509. u32 thru_rate;
  510. u32 gap_thresh;
  511. u32 tx_start_wds;
  512. u32 flow_control;
  513. };
  514. struct gbe_switch_regs_ofs {
  515. u16 id_ver;
  516. u16 control;
  517. u16 soft_reset;
  518. u16 emcontrol;
  519. u16 stat_port_en;
  520. u16 ptype;
  521. u16 flow_control;
  522. };
  523. struct gbe_port_regs {
  524. u32 max_blks;
  525. u32 blk_cnt;
  526. u32 port_vlan;
  527. u32 tx_pri_map;
  528. u32 sa_lo;
  529. u32 sa_hi;
  530. u32 ts_ctl;
  531. u32 ts_seq_ltype;
  532. u32 ts_vlan;
  533. u32 ts_ctl_ltype2;
  534. u32 ts_ctl2;
  535. };
  536. struct gbe_port_regs_ofs {
  537. u16 port_vlan;
  538. u16 tx_pri_map;
  539. u16 rx_pri_map;
  540. u16 sa_lo;
  541. u16 sa_hi;
  542. u16 ts_ctl;
  543. u16 ts_seq_ltype;
  544. u16 ts_vlan;
  545. u16 ts_ctl_ltype2;
  546. u16 ts_ctl2;
  547. u16 rx_maxlen; /* 2U, NU */
  548. };
  549. struct gbe_host_port_regs {
  550. u32 src_id;
  551. u32 port_vlan;
  552. u32 rx_pri_map;
  553. u32 rx_maxlen;
  554. };
  555. struct gbe_host_port_regs_ofs {
  556. u16 port_vlan;
  557. u16 tx_pri_map;
  558. u16 rx_maxlen;
  559. };
  560. struct gbe_emac_regs {
  561. u32 id_ver;
  562. u32 mac_control;
  563. u32 mac_status;
  564. u32 soft_reset;
  565. u32 rx_maxlen;
  566. u32 __reserved_0;
  567. u32 rx_pause;
  568. u32 tx_pause;
  569. u32 __reserved_1;
  570. u32 rx_pri_map;
  571. u32 rsvd[6];
  572. };
  573. struct gbe_emac_regs_ofs {
  574. u16 mac_control;
  575. u16 soft_reset;
  576. u16 rx_maxlen;
  577. };
  578. struct gbe_hw_stats {
  579. u32 rx_good_frames;
  580. u32 rx_broadcast_frames;
  581. u32 rx_multicast_frames;
  582. u32 rx_pause_frames;
  583. u32 rx_crc_errors;
  584. u32 rx_align_code_errors;
  585. u32 rx_oversized_frames;
  586. u32 rx_jabber_frames;
  587. u32 rx_undersized_frames;
  588. u32 rx_fragments;
  589. u32 __pad_0[2];
  590. u32 rx_bytes;
  591. u32 tx_good_frames;
  592. u32 tx_broadcast_frames;
  593. u32 tx_multicast_frames;
  594. u32 tx_pause_frames;
  595. u32 tx_deferred_frames;
  596. u32 tx_collision_frames;
  597. u32 tx_single_coll_frames;
  598. u32 tx_mult_coll_frames;
  599. u32 tx_excessive_collisions;
  600. u32 tx_late_collisions;
  601. u32 tx_underrun;
  602. u32 tx_carrier_sense_errors;
  603. u32 tx_bytes;
  604. u32 tx_64byte_frames;
  605. u32 tx_65_to_127byte_frames;
  606. u32 tx_128_to_255byte_frames;
  607. u32 tx_256_to_511byte_frames;
  608. u32 tx_512_to_1023byte_frames;
  609. u32 tx_1024byte_frames;
  610. u32 net_bytes;
  611. u32 rx_sof_overruns;
  612. u32 rx_mof_overruns;
  613. u32 rx_dma_overruns;
  614. };
  615. #define GBE_MAX_HW_STAT_MODS 9
  616. #define GBE_HW_STATS_REG_MAP_SZ 0x100
  617. struct ts_ctl {
  618. int uni;
  619. u8 dst_port_map;
  620. u8 maddr_map;
  621. u8 ts_mcast_type;
  622. };
  623. struct gbe_slave {
  624. void __iomem *port_regs;
  625. void __iomem *emac_regs;
  626. struct gbe_port_regs_ofs port_regs_ofs;
  627. struct gbe_emac_regs_ofs emac_regs_ofs;
  628. int slave_num; /* 0 based logical number */
  629. int port_num; /* actual port number */
  630. atomic_t link_state;
  631. bool open;
  632. struct phy_device *phy;
  633. u32 link_interface;
  634. u32 mac_control;
  635. u8 phy_port_t;
  636. struct device_node *node;
  637. struct device_node *phy_node;
  638. struct ts_ctl ts_ctl;
  639. struct list_head slave_list;
  640. };
  641. struct gbe_priv {
  642. struct device *dev;
  643. struct netcp_device *netcp_device;
  644. struct timer_list timer;
  645. u32 num_slaves;
  646. u32 ale_ports;
  647. bool enable_ale;
  648. u8 max_num_slaves;
  649. u8 max_num_ports; /* max_num_slaves + 1 */
  650. u8 num_stats_mods;
  651. struct netcp_tx_pipe tx_pipe;
  652. int host_port;
  653. u32 rx_packet_max;
  654. u32 ss_version;
  655. u32 stats_en_mask;
  656. void __iomem *ss_regs;
  657. void __iomem *switch_regs;
  658. void __iomem *host_port_regs;
  659. void __iomem *ale_reg;
  660. void __iomem *cpts_reg;
  661. void __iomem *sgmii_port_regs;
  662. void __iomem *sgmii_port34_regs;
  663. void __iomem *xgbe_serdes_regs;
  664. void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
  665. struct gbe_ss_regs_ofs ss_regs_ofs;
  666. struct gbe_switch_regs_ofs switch_regs_ofs;
  667. struct gbe_host_port_regs_ofs host_port_regs_ofs;
  668. struct cpsw_ale *ale;
  669. unsigned int tx_queue_id;
  670. const char *dma_chan_name;
  671. struct list_head gbe_intf_head;
  672. struct list_head secondary_slaves;
  673. struct net_device *dummy_ndev;
  674. u64 *hw_stats;
  675. u32 *hw_stats_prev;
  676. const struct netcp_ethtool_stat *et_stats;
  677. int num_et_stats;
  678. /* Lock for updating the hwstats */
  679. spinlock_t hw_stats_lock;
  680. int cpts_registered;
  681. struct cpts *cpts;
  682. int rx_ts_enabled;
  683. int tx_ts_enabled;
  684. };
  685. struct gbe_intf {
  686. struct net_device *ndev;
  687. struct device *dev;
  688. struct gbe_priv *gbe_dev;
  689. struct netcp_tx_pipe tx_pipe;
  690. struct gbe_slave *slave;
  691. struct list_head gbe_intf_list;
  692. unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
  693. };
  694. static struct netcp_module gbe_module;
  695. static struct netcp_module xgbe_module;
  696. /* Statistic management */
  697. struct netcp_ethtool_stat {
  698. char desc[ETH_GSTRING_LEN];
  699. int type;
  700. u32 size;
  701. int offset;
  702. };
  703. #define GBE_STATSA_INFO(field) \
  704. { \
  705. "GBE_A:"#field, GBE_STATSA_MODULE, \
  706. sizeof_field(struct gbe_hw_stats, field), \
  707. offsetof(struct gbe_hw_stats, field) \
  708. }
  709. #define GBE_STATSB_INFO(field) \
  710. { \
  711. "GBE_B:"#field, GBE_STATSB_MODULE, \
  712. sizeof_field(struct gbe_hw_stats, field), \
  713. offsetof(struct gbe_hw_stats, field) \
  714. }
  715. #define GBE_STATSC_INFO(field) \
  716. { \
  717. "GBE_C:"#field, GBE_STATSC_MODULE, \
  718. sizeof_field(struct gbe_hw_stats, field), \
  719. offsetof(struct gbe_hw_stats, field) \
  720. }
  721. #define GBE_STATSD_INFO(field) \
  722. { \
  723. "GBE_D:"#field, GBE_STATSD_MODULE, \
  724. sizeof_field(struct gbe_hw_stats, field), \
  725. offsetof(struct gbe_hw_stats, field) \
  726. }
  727. static const struct netcp_ethtool_stat gbe13_et_stats[] = {
  728. /* GBE module A */
  729. GBE_STATSA_INFO(rx_good_frames),
  730. GBE_STATSA_INFO(rx_broadcast_frames),
  731. GBE_STATSA_INFO(rx_multicast_frames),
  732. GBE_STATSA_INFO(rx_pause_frames),
  733. GBE_STATSA_INFO(rx_crc_errors),
  734. GBE_STATSA_INFO(rx_align_code_errors),
  735. GBE_STATSA_INFO(rx_oversized_frames),
  736. GBE_STATSA_INFO(rx_jabber_frames),
  737. GBE_STATSA_INFO(rx_undersized_frames),
  738. GBE_STATSA_INFO(rx_fragments),
  739. GBE_STATSA_INFO(rx_bytes),
  740. GBE_STATSA_INFO(tx_good_frames),
  741. GBE_STATSA_INFO(tx_broadcast_frames),
  742. GBE_STATSA_INFO(tx_multicast_frames),
  743. GBE_STATSA_INFO(tx_pause_frames),
  744. GBE_STATSA_INFO(tx_deferred_frames),
  745. GBE_STATSA_INFO(tx_collision_frames),
  746. GBE_STATSA_INFO(tx_single_coll_frames),
  747. GBE_STATSA_INFO(tx_mult_coll_frames),
  748. GBE_STATSA_INFO(tx_excessive_collisions),
  749. GBE_STATSA_INFO(tx_late_collisions),
  750. GBE_STATSA_INFO(tx_underrun),
  751. GBE_STATSA_INFO(tx_carrier_sense_errors),
  752. GBE_STATSA_INFO(tx_bytes),
  753. GBE_STATSA_INFO(tx_64byte_frames),
  754. GBE_STATSA_INFO(tx_65_to_127byte_frames),
  755. GBE_STATSA_INFO(tx_128_to_255byte_frames),
  756. GBE_STATSA_INFO(tx_256_to_511byte_frames),
  757. GBE_STATSA_INFO(tx_512_to_1023byte_frames),
  758. GBE_STATSA_INFO(tx_1024byte_frames),
  759. GBE_STATSA_INFO(net_bytes),
  760. GBE_STATSA_INFO(rx_sof_overruns),
  761. GBE_STATSA_INFO(rx_mof_overruns),
  762. GBE_STATSA_INFO(rx_dma_overruns),
  763. /* GBE module B */
  764. GBE_STATSB_INFO(rx_good_frames),
  765. GBE_STATSB_INFO(rx_broadcast_frames),
  766. GBE_STATSB_INFO(rx_multicast_frames),
  767. GBE_STATSB_INFO(rx_pause_frames),
  768. GBE_STATSB_INFO(rx_crc_errors),
  769. GBE_STATSB_INFO(rx_align_code_errors),
  770. GBE_STATSB_INFO(rx_oversized_frames),
  771. GBE_STATSB_INFO(rx_jabber_frames),
  772. GBE_STATSB_INFO(rx_undersized_frames),
  773. GBE_STATSB_INFO(rx_fragments),
  774. GBE_STATSB_INFO(rx_bytes),
  775. GBE_STATSB_INFO(tx_good_frames),
  776. GBE_STATSB_INFO(tx_broadcast_frames),
  777. GBE_STATSB_INFO(tx_multicast_frames),
  778. GBE_STATSB_INFO(tx_pause_frames),
  779. GBE_STATSB_INFO(tx_deferred_frames),
  780. GBE_STATSB_INFO(tx_collision_frames),
  781. GBE_STATSB_INFO(tx_single_coll_frames),
  782. GBE_STATSB_INFO(tx_mult_coll_frames),
  783. GBE_STATSB_INFO(tx_excessive_collisions),
  784. GBE_STATSB_INFO(tx_late_collisions),
  785. GBE_STATSB_INFO(tx_underrun),
  786. GBE_STATSB_INFO(tx_carrier_sense_errors),
  787. GBE_STATSB_INFO(tx_bytes),
  788. GBE_STATSB_INFO(tx_64byte_frames),
  789. GBE_STATSB_INFO(tx_65_to_127byte_frames),
  790. GBE_STATSB_INFO(tx_128_to_255byte_frames),
  791. GBE_STATSB_INFO(tx_256_to_511byte_frames),
  792. GBE_STATSB_INFO(tx_512_to_1023byte_frames),
  793. GBE_STATSB_INFO(tx_1024byte_frames),
  794. GBE_STATSB_INFO(net_bytes),
  795. GBE_STATSB_INFO(rx_sof_overruns),
  796. GBE_STATSB_INFO(rx_mof_overruns),
  797. GBE_STATSB_INFO(rx_dma_overruns),
  798. /* GBE module C */
  799. GBE_STATSC_INFO(rx_good_frames),
  800. GBE_STATSC_INFO(rx_broadcast_frames),
  801. GBE_STATSC_INFO(rx_multicast_frames),
  802. GBE_STATSC_INFO(rx_pause_frames),
  803. GBE_STATSC_INFO(rx_crc_errors),
  804. GBE_STATSC_INFO(rx_align_code_errors),
  805. GBE_STATSC_INFO(rx_oversized_frames),
  806. GBE_STATSC_INFO(rx_jabber_frames),
  807. GBE_STATSC_INFO(rx_undersized_frames),
  808. GBE_STATSC_INFO(rx_fragments),
  809. GBE_STATSC_INFO(rx_bytes),
  810. GBE_STATSC_INFO(tx_good_frames),
  811. GBE_STATSC_INFO(tx_broadcast_frames),
  812. GBE_STATSC_INFO(tx_multicast_frames),
  813. GBE_STATSC_INFO(tx_pause_frames),
  814. GBE_STATSC_INFO(tx_deferred_frames),
  815. GBE_STATSC_INFO(tx_collision_frames),
  816. GBE_STATSC_INFO(tx_single_coll_frames),
  817. GBE_STATSC_INFO(tx_mult_coll_frames),
  818. GBE_STATSC_INFO(tx_excessive_collisions),
  819. GBE_STATSC_INFO(tx_late_collisions),
  820. GBE_STATSC_INFO(tx_underrun),
  821. GBE_STATSC_INFO(tx_carrier_sense_errors),
  822. GBE_STATSC_INFO(tx_bytes),
  823. GBE_STATSC_INFO(tx_64byte_frames),
  824. GBE_STATSC_INFO(tx_65_to_127byte_frames),
  825. GBE_STATSC_INFO(tx_128_to_255byte_frames),
  826. GBE_STATSC_INFO(tx_256_to_511byte_frames),
  827. GBE_STATSC_INFO(tx_512_to_1023byte_frames),
  828. GBE_STATSC_INFO(tx_1024byte_frames),
  829. GBE_STATSC_INFO(net_bytes),
  830. GBE_STATSC_INFO(rx_sof_overruns),
  831. GBE_STATSC_INFO(rx_mof_overruns),
  832. GBE_STATSC_INFO(rx_dma_overruns),
  833. /* GBE module D */
  834. GBE_STATSD_INFO(rx_good_frames),
  835. GBE_STATSD_INFO(rx_broadcast_frames),
  836. GBE_STATSD_INFO(rx_multicast_frames),
  837. GBE_STATSD_INFO(rx_pause_frames),
  838. GBE_STATSD_INFO(rx_crc_errors),
  839. GBE_STATSD_INFO(rx_align_code_errors),
  840. GBE_STATSD_INFO(rx_oversized_frames),
  841. GBE_STATSD_INFO(rx_jabber_frames),
  842. GBE_STATSD_INFO(rx_undersized_frames),
  843. GBE_STATSD_INFO(rx_fragments),
  844. GBE_STATSD_INFO(rx_bytes),
  845. GBE_STATSD_INFO(tx_good_frames),
  846. GBE_STATSD_INFO(tx_broadcast_frames),
  847. GBE_STATSD_INFO(tx_multicast_frames),
  848. GBE_STATSD_INFO(tx_pause_frames),
  849. GBE_STATSD_INFO(tx_deferred_frames),
  850. GBE_STATSD_INFO(tx_collision_frames),
  851. GBE_STATSD_INFO(tx_single_coll_frames),
  852. GBE_STATSD_INFO(tx_mult_coll_frames),
  853. GBE_STATSD_INFO(tx_excessive_collisions),
  854. GBE_STATSD_INFO(tx_late_collisions),
  855. GBE_STATSD_INFO(tx_underrun),
  856. GBE_STATSD_INFO(tx_carrier_sense_errors),
  857. GBE_STATSD_INFO(tx_bytes),
  858. GBE_STATSD_INFO(tx_64byte_frames),
  859. GBE_STATSD_INFO(tx_65_to_127byte_frames),
  860. GBE_STATSD_INFO(tx_128_to_255byte_frames),
  861. GBE_STATSD_INFO(tx_256_to_511byte_frames),
  862. GBE_STATSD_INFO(tx_512_to_1023byte_frames),
  863. GBE_STATSD_INFO(tx_1024byte_frames),
  864. GBE_STATSD_INFO(net_bytes),
  865. GBE_STATSD_INFO(rx_sof_overruns),
  866. GBE_STATSD_INFO(rx_mof_overruns),
  867. GBE_STATSD_INFO(rx_dma_overruns),
  868. };
  869. /* This is the size of entries in GBENU_STATS_HOST */
  870. #define GBENU_ET_STATS_HOST_SIZE 52
  871. #define GBENU_STATS_HOST(field) \
  872. { \
  873. "GBE_HOST:"#field, GBENU_STATS0_MODULE, \
  874. sizeof_field(struct gbenu_hw_stats, field), \
  875. offsetof(struct gbenu_hw_stats, field) \
  876. }
  877. /* This is the size of entries in GBENU_STATS_PORT */
  878. #define GBENU_ET_STATS_PORT_SIZE 65
  879. #define GBENU_STATS_P1(field) \
  880. { \
  881. "GBE_P1:"#field, GBENU_STATS1_MODULE, \
  882. sizeof_field(struct gbenu_hw_stats, field), \
  883. offsetof(struct gbenu_hw_stats, field) \
  884. }
  885. #define GBENU_STATS_P2(field) \
  886. { \
  887. "GBE_P2:"#field, GBENU_STATS2_MODULE, \
  888. sizeof_field(struct gbenu_hw_stats, field), \
  889. offsetof(struct gbenu_hw_stats, field) \
  890. }
  891. #define GBENU_STATS_P3(field) \
  892. { \
  893. "GBE_P3:"#field, GBENU_STATS3_MODULE, \
  894. sizeof_field(struct gbenu_hw_stats, field), \
  895. offsetof(struct gbenu_hw_stats, field) \
  896. }
  897. #define GBENU_STATS_P4(field) \
  898. { \
  899. "GBE_P4:"#field, GBENU_STATS4_MODULE, \
  900. sizeof_field(struct gbenu_hw_stats, field), \
  901. offsetof(struct gbenu_hw_stats, field) \
  902. }
  903. #define GBENU_STATS_P5(field) \
  904. { \
  905. "GBE_P5:"#field, GBENU_STATS5_MODULE, \
  906. sizeof_field(struct gbenu_hw_stats, field), \
  907. offsetof(struct gbenu_hw_stats, field) \
  908. }
  909. #define GBENU_STATS_P6(field) \
  910. { \
  911. "GBE_P6:"#field, GBENU_STATS6_MODULE, \
  912. sizeof_field(struct gbenu_hw_stats, field), \
  913. offsetof(struct gbenu_hw_stats, field) \
  914. }
  915. #define GBENU_STATS_P7(field) \
  916. { \
  917. "GBE_P7:"#field, GBENU_STATS7_MODULE, \
  918. sizeof_field(struct gbenu_hw_stats, field), \
  919. offsetof(struct gbenu_hw_stats, field) \
  920. }
  921. #define GBENU_STATS_P8(field) \
  922. { \
  923. "GBE_P8:"#field, GBENU_STATS8_MODULE, \
  924. sizeof_field(struct gbenu_hw_stats, field), \
  925. offsetof(struct gbenu_hw_stats, field) \
  926. }
  927. static const struct netcp_ethtool_stat gbenu_et_stats[] = {
  928. /* GBENU Host Module */
  929. GBENU_STATS_HOST(rx_good_frames),
  930. GBENU_STATS_HOST(rx_broadcast_frames),
  931. GBENU_STATS_HOST(rx_multicast_frames),
  932. GBENU_STATS_HOST(rx_crc_errors),
  933. GBENU_STATS_HOST(rx_oversized_frames),
  934. GBENU_STATS_HOST(rx_undersized_frames),
  935. GBENU_STATS_HOST(ale_drop),
  936. GBENU_STATS_HOST(ale_overrun_drop),
  937. GBENU_STATS_HOST(rx_bytes),
  938. GBENU_STATS_HOST(tx_good_frames),
  939. GBENU_STATS_HOST(tx_broadcast_frames),
  940. GBENU_STATS_HOST(tx_multicast_frames),
  941. GBENU_STATS_HOST(tx_bytes),
  942. GBENU_STATS_HOST(tx_64B_frames),
  943. GBENU_STATS_HOST(tx_65_to_127B_frames),
  944. GBENU_STATS_HOST(tx_128_to_255B_frames),
  945. GBENU_STATS_HOST(tx_256_to_511B_frames),
  946. GBENU_STATS_HOST(tx_512_to_1023B_frames),
  947. GBENU_STATS_HOST(tx_1024B_frames),
  948. GBENU_STATS_HOST(net_bytes),
  949. GBENU_STATS_HOST(rx_bottom_fifo_drop),
  950. GBENU_STATS_HOST(rx_port_mask_drop),
  951. GBENU_STATS_HOST(rx_top_fifo_drop),
  952. GBENU_STATS_HOST(ale_rate_limit_drop),
  953. GBENU_STATS_HOST(ale_vid_ingress_drop),
  954. GBENU_STATS_HOST(ale_da_eq_sa_drop),
  955. GBENU_STATS_HOST(ale_unknown_ucast),
  956. GBENU_STATS_HOST(ale_unknown_ucast_bytes),
  957. GBENU_STATS_HOST(ale_unknown_mcast),
  958. GBENU_STATS_HOST(ale_unknown_mcast_bytes),
  959. GBENU_STATS_HOST(ale_unknown_bcast),
  960. GBENU_STATS_HOST(ale_unknown_bcast_bytes),
  961. GBENU_STATS_HOST(ale_pol_match),
  962. GBENU_STATS_HOST(ale_pol_match_red),
  963. GBENU_STATS_HOST(ale_pol_match_yellow),
  964. GBENU_STATS_HOST(tx_mem_protect_err),
  965. GBENU_STATS_HOST(tx_pri0_drop),
  966. GBENU_STATS_HOST(tx_pri1_drop),
  967. GBENU_STATS_HOST(tx_pri2_drop),
  968. GBENU_STATS_HOST(tx_pri3_drop),
  969. GBENU_STATS_HOST(tx_pri4_drop),
  970. GBENU_STATS_HOST(tx_pri5_drop),
  971. GBENU_STATS_HOST(tx_pri6_drop),
  972. GBENU_STATS_HOST(tx_pri7_drop),
  973. GBENU_STATS_HOST(tx_pri0_drop_bcnt),
  974. GBENU_STATS_HOST(tx_pri1_drop_bcnt),
  975. GBENU_STATS_HOST(tx_pri2_drop_bcnt),
  976. GBENU_STATS_HOST(tx_pri3_drop_bcnt),
  977. GBENU_STATS_HOST(tx_pri4_drop_bcnt),
  978. GBENU_STATS_HOST(tx_pri5_drop_bcnt),
  979. GBENU_STATS_HOST(tx_pri6_drop_bcnt),
  980. GBENU_STATS_HOST(tx_pri7_drop_bcnt),
  981. /* GBENU Module 1 */
  982. GBENU_STATS_P1(rx_good_frames),
  983. GBENU_STATS_P1(rx_broadcast_frames),
  984. GBENU_STATS_P1(rx_multicast_frames),
  985. GBENU_STATS_P1(rx_pause_frames),
  986. GBENU_STATS_P1(rx_crc_errors),
  987. GBENU_STATS_P1(rx_align_code_errors),
  988. GBENU_STATS_P1(rx_oversized_frames),
  989. GBENU_STATS_P1(rx_jabber_frames),
  990. GBENU_STATS_P1(rx_undersized_frames),
  991. GBENU_STATS_P1(rx_fragments),
  992. GBENU_STATS_P1(ale_drop),
  993. GBENU_STATS_P1(ale_overrun_drop),
  994. GBENU_STATS_P1(rx_bytes),
  995. GBENU_STATS_P1(tx_good_frames),
  996. GBENU_STATS_P1(tx_broadcast_frames),
  997. GBENU_STATS_P1(tx_multicast_frames),
  998. GBENU_STATS_P1(tx_pause_frames),
  999. GBENU_STATS_P1(tx_deferred_frames),
  1000. GBENU_STATS_P1(tx_collision_frames),
  1001. GBENU_STATS_P1(tx_single_coll_frames),
  1002. GBENU_STATS_P1(tx_mult_coll_frames),
  1003. GBENU_STATS_P1(tx_excessive_collisions),
  1004. GBENU_STATS_P1(tx_late_collisions),
  1005. GBENU_STATS_P1(rx_ipg_error),
  1006. GBENU_STATS_P1(tx_carrier_sense_errors),
  1007. GBENU_STATS_P1(tx_bytes),
  1008. GBENU_STATS_P1(tx_64B_frames),
  1009. GBENU_STATS_P1(tx_65_to_127B_frames),
  1010. GBENU_STATS_P1(tx_128_to_255B_frames),
  1011. GBENU_STATS_P1(tx_256_to_511B_frames),
  1012. GBENU_STATS_P1(tx_512_to_1023B_frames),
  1013. GBENU_STATS_P1(tx_1024B_frames),
  1014. GBENU_STATS_P1(net_bytes),
  1015. GBENU_STATS_P1(rx_bottom_fifo_drop),
  1016. GBENU_STATS_P1(rx_port_mask_drop),
  1017. GBENU_STATS_P1(rx_top_fifo_drop),
  1018. GBENU_STATS_P1(ale_rate_limit_drop),
  1019. GBENU_STATS_P1(ale_vid_ingress_drop),
  1020. GBENU_STATS_P1(ale_da_eq_sa_drop),
  1021. GBENU_STATS_P1(ale_unknown_ucast),
  1022. GBENU_STATS_P1(ale_unknown_ucast_bytes),
  1023. GBENU_STATS_P1(ale_unknown_mcast),
  1024. GBENU_STATS_P1(ale_unknown_mcast_bytes),
  1025. GBENU_STATS_P1(ale_unknown_bcast),
  1026. GBENU_STATS_P1(ale_unknown_bcast_bytes),
  1027. GBENU_STATS_P1(ale_pol_match),
  1028. GBENU_STATS_P1(ale_pol_match_red),
  1029. GBENU_STATS_P1(ale_pol_match_yellow),
  1030. GBENU_STATS_P1(tx_mem_protect_err),
  1031. GBENU_STATS_P1(tx_pri0_drop),
  1032. GBENU_STATS_P1(tx_pri1_drop),
  1033. GBENU_STATS_P1(tx_pri2_drop),
  1034. GBENU_STATS_P1(tx_pri3_drop),
  1035. GBENU_STATS_P1(tx_pri4_drop),
  1036. GBENU_STATS_P1(tx_pri5_drop),
  1037. GBENU_STATS_P1(tx_pri6_drop),
  1038. GBENU_STATS_P1(tx_pri7_drop),
  1039. GBENU_STATS_P1(tx_pri0_drop_bcnt),
  1040. GBENU_STATS_P1(tx_pri1_drop_bcnt),
  1041. GBENU_STATS_P1(tx_pri2_drop_bcnt),
  1042. GBENU_STATS_P1(tx_pri3_drop_bcnt),
  1043. GBENU_STATS_P1(tx_pri4_drop_bcnt),
  1044. GBENU_STATS_P1(tx_pri5_drop_bcnt),
  1045. GBENU_STATS_P1(tx_pri6_drop_bcnt),
  1046. GBENU_STATS_P1(tx_pri7_drop_bcnt),
  1047. /* GBENU Module 2 */
  1048. GBENU_STATS_P2(rx_good_frames),
  1049. GBENU_STATS_P2(rx_broadcast_frames),
  1050. GBENU_STATS_P2(rx_multicast_frames),
  1051. GBENU_STATS_P2(rx_pause_frames),
  1052. GBENU_STATS_P2(rx_crc_errors),
  1053. GBENU_STATS_P2(rx_align_code_errors),
  1054. GBENU_STATS_P2(rx_oversized_frames),
  1055. GBENU_STATS_P2(rx_jabber_frames),
  1056. GBENU_STATS_P2(rx_undersized_frames),
  1057. GBENU_STATS_P2(rx_fragments),
  1058. GBENU_STATS_P2(ale_drop),
  1059. GBENU_STATS_P2(ale_overrun_drop),
  1060. GBENU_STATS_P2(rx_bytes),
  1061. GBENU_STATS_P2(tx_good_frames),
  1062. GBENU_STATS_P2(tx_broadcast_frames),
  1063. GBENU_STATS_P2(tx_multicast_frames),
  1064. GBENU_STATS_P2(tx_pause_frames),
  1065. GBENU_STATS_P2(tx_deferred_frames),
  1066. GBENU_STATS_P2(tx_collision_frames),
  1067. GBENU_STATS_P2(tx_single_coll_frames),
  1068. GBENU_STATS_P2(tx_mult_coll_frames),
  1069. GBENU_STATS_P2(tx_excessive_collisions),
  1070. GBENU_STATS_P2(tx_late_collisions),
  1071. GBENU_STATS_P2(rx_ipg_error),
  1072. GBENU_STATS_P2(tx_carrier_sense_errors),
  1073. GBENU_STATS_P2(tx_bytes),
  1074. GBENU_STATS_P2(tx_64B_frames),
  1075. GBENU_STATS_P2(tx_65_to_127B_frames),
  1076. GBENU_STATS_P2(tx_128_to_255B_frames),
  1077. GBENU_STATS_P2(tx_256_to_511B_frames),
  1078. GBENU_STATS_P2(tx_512_to_1023B_frames),
  1079. GBENU_STATS_P2(tx_1024B_frames),
  1080. GBENU_STATS_P2(net_bytes),
  1081. GBENU_STATS_P2(rx_bottom_fifo_drop),
  1082. GBENU_STATS_P2(rx_port_mask_drop),
  1083. GBENU_STATS_P2(rx_top_fifo_drop),
  1084. GBENU_STATS_P2(ale_rate_limit_drop),
  1085. GBENU_STATS_P2(ale_vid_ingress_drop),
  1086. GBENU_STATS_P2(ale_da_eq_sa_drop),
  1087. GBENU_STATS_P2(ale_unknown_ucast),
  1088. GBENU_STATS_P2(ale_unknown_ucast_bytes),
  1089. GBENU_STATS_P2(ale_unknown_mcast),
  1090. GBENU_STATS_P2(ale_unknown_mcast_bytes),
  1091. GBENU_STATS_P2(ale_unknown_bcast),
  1092. GBENU_STATS_P2(ale_unknown_bcast_bytes),
  1093. GBENU_STATS_P2(ale_pol_match),
  1094. GBENU_STATS_P2(ale_pol_match_red),
  1095. GBENU_STATS_P2(ale_pol_match_yellow),
  1096. GBENU_STATS_P2(tx_mem_protect_err),
  1097. GBENU_STATS_P2(tx_pri0_drop),
  1098. GBENU_STATS_P2(tx_pri1_drop),
  1099. GBENU_STATS_P2(tx_pri2_drop),
  1100. GBENU_STATS_P2(tx_pri3_drop),
  1101. GBENU_STATS_P2(tx_pri4_drop),
  1102. GBENU_STATS_P2(tx_pri5_drop),
  1103. GBENU_STATS_P2(tx_pri6_drop),
  1104. GBENU_STATS_P2(tx_pri7_drop),
  1105. GBENU_STATS_P2(tx_pri0_drop_bcnt),
  1106. GBENU_STATS_P2(tx_pri1_drop_bcnt),
  1107. GBENU_STATS_P2(tx_pri2_drop_bcnt),
  1108. GBENU_STATS_P2(tx_pri3_drop_bcnt),
  1109. GBENU_STATS_P2(tx_pri4_drop_bcnt),
  1110. GBENU_STATS_P2(tx_pri5_drop_bcnt),
  1111. GBENU_STATS_P2(tx_pri6_drop_bcnt),
  1112. GBENU_STATS_P2(tx_pri7_drop_bcnt),
  1113. /* GBENU Module 3 */
  1114. GBENU_STATS_P3(rx_good_frames),
  1115. GBENU_STATS_P3(rx_broadcast_frames),
  1116. GBENU_STATS_P3(rx_multicast_frames),
  1117. GBENU_STATS_P3(rx_pause_frames),
  1118. GBENU_STATS_P3(rx_crc_errors),
  1119. GBENU_STATS_P3(rx_align_code_errors),
  1120. GBENU_STATS_P3(rx_oversized_frames),
  1121. GBENU_STATS_P3(rx_jabber_frames),
  1122. GBENU_STATS_P3(rx_undersized_frames),
  1123. GBENU_STATS_P3(rx_fragments),
  1124. GBENU_STATS_P3(ale_drop),
  1125. GBENU_STATS_P3(ale_overrun_drop),
  1126. GBENU_STATS_P3(rx_bytes),
  1127. GBENU_STATS_P3(tx_good_frames),
  1128. GBENU_STATS_P3(tx_broadcast_frames),
  1129. GBENU_STATS_P3(tx_multicast_frames),
  1130. GBENU_STATS_P3(tx_pause_frames),
  1131. GBENU_STATS_P3(tx_deferred_frames),
  1132. GBENU_STATS_P3(tx_collision_frames),
  1133. GBENU_STATS_P3(tx_single_coll_frames),
  1134. GBENU_STATS_P3(tx_mult_coll_frames),
  1135. GBENU_STATS_P3(tx_excessive_collisions),
  1136. GBENU_STATS_P3(tx_late_collisions),
  1137. GBENU_STATS_P3(rx_ipg_error),
  1138. GBENU_STATS_P3(tx_carrier_sense_errors),
  1139. GBENU_STATS_P3(tx_bytes),
  1140. GBENU_STATS_P3(tx_64B_frames),
  1141. GBENU_STATS_P3(tx_65_to_127B_frames),
  1142. GBENU_STATS_P3(tx_128_to_255B_frames),
  1143. GBENU_STATS_P3(tx_256_to_511B_frames),
  1144. GBENU_STATS_P3(tx_512_to_1023B_frames),
  1145. GBENU_STATS_P3(tx_1024B_frames),
  1146. GBENU_STATS_P3(net_bytes),
  1147. GBENU_STATS_P3(rx_bottom_fifo_drop),
  1148. GBENU_STATS_P3(rx_port_mask_drop),
  1149. GBENU_STATS_P3(rx_top_fifo_drop),
  1150. GBENU_STATS_P3(ale_rate_limit_drop),
  1151. GBENU_STATS_P3(ale_vid_ingress_drop),
  1152. GBENU_STATS_P3(ale_da_eq_sa_drop),
  1153. GBENU_STATS_P3(ale_unknown_ucast),
  1154. GBENU_STATS_P3(ale_unknown_ucast_bytes),
  1155. GBENU_STATS_P3(ale_unknown_mcast),
  1156. GBENU_STATS_P3(ale_unknown_mcast_bytes),
  1157. GBENU_STATS_P3(ale_unknown_bcast),
  1158. GBENU_STATS_P3(ale_unknown_bcast_bytes),
  1159. GBENU_STATS_P3(ale_pol_match),
  1160. GBENU_STATS_P3(ale_pol_match_red),
  1161. GBENU_STATS_P3(ale_pol_match_yellow),
  1162. GBENU_STATS_P3(tx_mem_protect_err),
  1163. GBENU_STATS_P3(tx_pri0_drop),
  1164. GBENU_STATS_P3(tx_pri1_drop),
  1165. GBENU_STATS_P3(tx_pri2_drop),
  1166. GBENU_STATS_P3(tx_pri3_drop),
  1167. GBENU_STATS_P3(tx_pri4_drop),
  1168. GBENU_STATS_P3(tx_pri5_drop),
  1169. GBENU_STATS_P3(tx_pri6_drop),
  1170. GBENU_STATS_P3(tx_pri7_drop),
  1171. GBENU_STATS_P3(tx_pri0_drop_bcnt),
  1172. GBENU_STATS_P3(tx_pri1_drop_bcnt),
  1173. GBENU_STATS_P3(tx_pri2_drop_bcnt),
  1174. GBENU_STATS_P3(tx_pri3_drop_bcnt),
  1175. GBENU_STATS_P3(tx_pri4_drop_bcnt),
  1176. GBENU_STATS_P3(tx_pri5_drop_bcnt),
  1177. GBENU_STATS_P3(tx_pri6_drop_bcnt),
  1178. GBENU_STATS_P3(tx_pri7_drop_bcnt),
  1179. /* GBENU Module 4 */
  1180. GBENU_STATS_P4(rx_good_frames),
  1181. GBENU_STATS_P4(rx_broadcast_frames),
  1182. GBENU_STATS_P4(rx_multicast_frames),
  1183. GBENU_STATS_P4(rx_pause_frames),
  1184. GBENU_STATS_P4(rx_crc_errors),
  1185. GBENU_STATS_P4(rx_align_code_errors),
  1186. GBENU_STATS_P4(rx_oversized_frames),
  1187. GBENU_STATS_P4(rx_jabber_frames),
  1188. GBENU_STATS_P4(rx_undersized_frames),
  1189. GBENU_STATS_P4(rx_fragments),
  1190. GBENU_STATS_P4(ale_drop),
  1191. GBENU_STATS_P4(ale_overrun_drop),
  1192. GBENU_STATS_P4(rx_bytes),
  1193. GBENU_STATS_P4(tx_good_frames),
  1194. GBENU_STATS_P4(tx_broadcast_frames),
  1195. GBENU_STATS_P4(tx_multicast_frames),
  1196. GBENU_STATS_P4(tx_pause_frames),
  1197. GBENU_STATS_P4(tx_deferred_frames),
  1198. GBENU_STATS_P4(tx_collision_frames),
  1199. GBENU_STATS_P4(tx_single_coll_frames),
  1200. GBENU_STATS_P4(tx_mult_coll_frames),
  1201. GBENU_STATS_P4(tx_excessive_collisions),
  1202. GBENU_STATS_P4(tx_late_collisions),
  1203. GBENU_STATS_P4(rx_ipg_error),
  1204. GBENU_STATS_P4(tx_carrier_sense_errors),
  1205. GBENU_STATS_P4(tx_bytes),
  1206. GBENU_STATS_P4(tx_64B_frames),
  1207. GBENU_STATS_P4(tx_65_to_127B_frames),
  1208. GBENU_STATS_P4(tx_128_to_255B_frames),
  1209. GBENU_STATS_P4(tx_256_to_511B_frames),
  1210. GBENU_STATS_P4(tx_512_to_1023B_frames),
  1211. GBENU_STATS_P4(tx_1024B_frames),
  1212. GBENU_STATS_P4(net_bytes),
  1213. GBENU_STATS_P4(rx_bottom_fifo_drop),
  1214. GBENU_STATS_P4(rx_port_mask_drop),
  1215. GBENU_STATS_P4(rx_top_fifo_drop),
  1216. GBENU_STATS_P4(ale_rate_limit_drop),
  1217. GBENU_STATS_P4(ale_vid_ingress_drop),
  1218. GBENU_STATS_P4(ale_da_eq_sa_drop),
  1219. GBENU_STATS_P4(ale_unknown_ucast),
  1220. GBENU_STATS_P4(ale_unknown_ucast_bytes),
  1221. GBENU_STATS_P4(ale_unknown_mcast),
  1222. GBENU_STATS_P4(ale_unknown_mcast_bytes),
  1223. GBENU_STATS_P4(ale_unknown_bcast),
  1224. GBENU_STATS_P4(ale_unknown_bcast_bytes),
  1225. GBENU_STATS_P4(ale_pol_match),
  1226. GBENU_STATS_P4(ale_pol_match_red),
  1227. GBENU_STATS_P4(ale_pol_match_yellow),
  1228. GBENU_STATS_P4(tx_mem_protect_err),
  1229. GBENU_STATS_P4(tx_pri0_drop),
  1230. GBENU_STATS_P4(tx_pri1_drop),
  1231. GBENU_STATS_P4(tx_pri2_drop),
  1232. GBENU_STATS_P4(tx_pri3_drop),
  1233. GBENU_STATS_P4(tx_pri4_drop),
  1234. GBENU_STATS_P4(tx_pri5_drop),
  1235. GBENU_STATS_P4(tx_pri6_drop),
  1236. GBENU_STATS_P4(tx_pri7_drop),
  1237. GBENU_STATS_P4(tx_pri0_drop_bcnt),
  1238. GBENU_STATS_P4(tx_pri1_drop_bcnt),
  1239. GBENU_STATS_P4(tx_pri2_drop_bcnt),
  1240. GBENU_STATS_P4(tx_pri3_drop_bcnt),
  1241. GBENU_STATS_P4(tx_pri4_drop_bcnt),
  1242. GBENU_STATS_P4(tx_pri5_drop_bcnt),
  1243. GBENU_STATS_P4(tx_pri6_drop_bcnt),
  1244. GBENU_STATS_P4(tx_pri7_drop_bcnt),
  1245. /* GBENU Module 5 */
  1246. GBENU_STATS_P5(rx_good_frames),
  1247. GBENU_STATS_P5(rx_broadcast_frames),
  1248. GBENU_STATS_P5(rx_multicast_frames),
  1249. GBENU_STATS_P5(rx_pause_frames),
  1250. GBENU_STATS_P5(rx_crc_errors),
  1251. GBENU_STATS_P5(rx_align_code_errors),
  1252. GBENU_STATS_P5(rx_oversized_frames),
  1253. GBENU_STATS_P5(rx_jabber_frames),
  1254. GBENU_STATS_P5(rx_undersized_frames),
  1255. GBENU_STATS_P5(rx_fragments),
  1256. GBENU_STATS_P5(ale_drop),
  1257. GBENU_STATS_P5(ale_overrun_drop),
  1258. GBENU_STATS_P5(rx_bytes),
  1259. GBENU_STATS_P5(tx_good_frames),
  1260. GBENU_STATS_P5(tx_broadcast_frames),
  1261. GBENU_STATS_P5(tx_multicast_frames),
  1262. GBENU_STATS_P5(tx_pause_frames),
  1263. GBENU_STATS_P5(tx_deferred_frames),
  1264. GBENU_STATS_P5(tx_collision_frames),
  1265. GBENU_STATS_P5(tx_single_coll_frames),
  1266. GBENU_STATS_P5(tx_mult_coll_frames),
  1267. GBENU_STATS_P5(tx_excessive_collisions),
  1268. GBENU_STATS_P5(tx_late_collisions),
  1269. GBENU_STATS_P5(rx_ipg_error),
  1270. GBENU_STATS_P5(tx_carrier_sense_errors),
  1271. GBENU_STATS_P5(tx_bytes),
  1272. GBENU_STATS_P5(tx_64B_frames),
  1273. GBENU_STATS_P5(tx_65_to_127B_frames),
  1274. GBENU_STATS_P5(tx_128_to_255B_frames),
  1275. GBENU_STATS_P5(tx_256_to_511B_frames),
  1276. GBENU_STATS_P5(tx_512_to_1023B_frames),
  1277. GBENU_STATS_P5(tx_1024B_frames),
  1278. GBENU_STATS_P5(net_bytes),
  1279. GBENU_STATS_P5(rx_bottom_fifo_drop),
  1280. GBENU_STATS_P5(rx_port_mask_drop),
  1281. GBENU_STATS_P5(rx_top_fifo_drop),
  1282. GBENU_STATS_P5(ale_rate_limit_drop),
  1283. GBENU_STATS_P5(ale_vid_ingress_drop),
  1284. GBENU_STATS_P5(ale_da_eq_sa_drop),
  1285. GBENU_STATS_P5(ale_unknown_ucast),
  1286. GBENU_STATS_P5(ale_unknown_ucast_bytes),
  1287. GBENU_STATS_P5(ale_unknown_mcast),
  1288. GBENU_STATS_P5(ale_unknown_mcast_bytes),
  1289. GBENU_STATS_P5(ale_unknown_bcast),
  1290. GBENU_STATS_P5(ale_unknown_bcast_bytes),
  1291. GBENU_STATS_P5(ale_pol_match),
  1292. GBENU_STATS_P5(ale_pol_match_red),
  1293. GBENU_STATS_P5(ale_pol_match_yellow),
  1294. GBENU_STATS_P5(tx_mem_protect_err),
  1295. GBENU_STATS_P5(tx_pri0_drop),
  1296. GBENU_STATS_P5(tx_pri1_drop),
  1297. GBENU_STATS_P5(tx_pri2_drop),
  1298. GBENU_STATS_P5(tx_pri3_drop),
  1299. GBENU_STATS_P5(tx_pri4_drop),
  1300. GBENU_STATS_P5(tx_pri5_drop),
  1301. GBENU_STATS_P5(tx_pri6_drop),
  1302. GBENU_STATS_P5(tx_pri7_drop),
  1303. GBENU_STATS_P5(tx_pri0_drop_bcnt),
  1304. GBENU_STATS_P5(tx_pri1_drop_bcnt),
  1305. GBENU_STATS_P5(tx_pri2_drop_bcnt),
  1306. GBENU_STATS_P5(tx_pri3_drop_bcnt),
  1307. GBENU_STATS_P5(tx_pri4_drop_bcnt),
  1308. GBENU_STATS_P5(tx_pri5_drop_bcnt),
  1309. GBENU_STATS_P5(tx_pri6_drop_bcnt),
  1310. GBENU_STATS_P5(tx_pri7_drop_bcnt),
  1311. /* GBENU Module 6 */
  1312. GBENU_STATS_P6(rx_good_frames),
  1313. GBENU_STATS_P6(rx_broadcast_frames),
  1314. GBENU_STATS_P6(rx_multicast_frames),
  1315. GBENU_STATS_P6(rx_pause_frames),
  1316. GBENU_STATS_P6(rx_crc_errors),
  1317. GBENU_STATS_P6(rx_align_code_errors),
  1318. GBENU_STATS_P6(rx_oversized_frames),
  1319. GBENU_STATS_P6(rx_jabber_frames),
  1320. GBENU_STATS_P6(rx_undersized_frames),
  1321. GBENU_STATS_P6(rx_fragments),
  1322. GBENU_STATS_P6(ale_drop),
  1323. GBENU_STATS_P6(ale_overrun_drop),
  1324. GBENU_STATS_P6(rx_bytes),
  1325. GBENU_STATS_P6(tx_good_frames),
  1326. GBENU_STATS_P6(tx_broadcast_frames),
  1327. GBENU_STATS_P6(tx_multicast_frames),
  1328. GBENU_STATS_P6(tx_pause_frames),
  1329. GBENU_STATS_P6(tx_deferred_frames),
  1330. GBENU_STATS_P6(tx_collision_frames),
  1331. GBENU_STATS_P6(tx_single_coll_frames),
  1332. GBENU_STATS_P6(tx_mult_coll_frames),
  1333. GBENU_STATS_P6(tx_excessive_collisions),
  1334. GBENU_STATS_P6(tx_late_collisions),
  1335. GBENU_STATS_P6(rx_ipg_error),
  1336. GBENU_STATS_P6(tx_carrier_sense_errors),
  1337. GBENU_STATS_P6(tx_bytes),
  1338. GBENU_STATS_P6(tx_64B_frames),
  1339. GBENU_STATS_P6(tx_65_to_127B_frames),
  1340. GBENU_STATS_P6(tx_128_to_255B_frames),
  1341. GBENU_STATS_P6(tx_256_to_511B_frames),
  1342. GBENU_STATS_P6(tx_512_to_1023B_frames),
  1343. GBENU_STATS_P6(tx_1024B_frames),
  1344. GBENU_STATS_P6(net_bytes),
  1345. GBENU_STATS_P6(rx_bottom_fifo_drop),
  1346. GBENU_STATS_P6(rx_port_mask_drop),
  1347. GBENU_STATS_P6(rx_top_fifo_drop),
  1348. GBENU_STATS_P6(ale_rate_limit_drop),
  1349. GBENU_STATS_P6(ale_vid_ingress_drop),
  1350. GBENU_STATS_P6(ale_da_eq_sa_drop),
  1351. GBENU_STATS_P6(ale_unknown_ucast),
  1352. GBENU_STATS_P6(ale_unknown_ucast_bytes),
  1353. GBENU_STATS_P6(ale_unknown_mcast),
  1354. GBENU_STATS_P6(ale_unknown_mcast_bytes),
  1355. GBENU_STATS_P6(ale_unknown_bcast),
  1356. GBENU_STATS_P6(ale_unknown_bcast_bytes),
  1357. GBENU_STATS_P6(ale_pol_match),
  1358. GBENU_STATS_P6(ale_pol_match_red),
  1359. GBENU_STATS_P6(ale_pol_match_yellow),
  1360. GBENU_STATS_P6(tx_mem_protect_err),
  1361. GBENU_STATS_P6(tx_pri0_drop),
  1362. GBENU_STATS_P6(tx_pri1_drop),
  1363. GBENU_STATS_P6(tx_pri2_drop),
  1364. GBENU_STATS_P6(tx_pri3_drop),
  1365. GBENU_STATS_P6(tx_pri4_drop),
  1366. GBENU_STATS_P6(tx_pri5_drop),
  1367. GBENU_STATS_P6(tx_pri6_drop),
  1368. GBENU_STATS_P6(tx_pri7_drop),
  1369. GBENU_STATS_P6(tx_pri0_drop_bcnt),
  1370. GBENU_STATS_P6(tx_pri1_drop_bcnt),
  1371. GBENU_STATS_P6(tx_pri2_drop_bcnt),
  1372. GBENU_STATS_P6(tx_pri3_drop_bcnt),
  1373. GBENU_STATS_P6(tx_pri4_drop_bcnt),
  1374. GBENU_STATS_P6(tx_pri5_drop_bcnt),
  1375. GBENU_STATS_P6(tx_pri6_drop_bcnt),
  1376. GBENU_STATS_P6(tx_pri7_drop_bcnt),
  1377. /* GBENU Module 7 */
  1378. GBENU_STATS_P7(rx_good_frames),
  1379. GBENU_STATS_P7(rx_broadcast_frames),
  1380. GBENU_STATS_P7(rx_multicast_frames),
  1381. GBENU_STATS_P7(rx_pause_frames),
  1382. GBENU_STATS_P7(rx_crc_errors),
  1383. GBENU_STATS_P7(rx_align_code_errors),
  1384. GBENU_STATS_P7(rx_oversized_frames),
  1385. GBENU_STATS_P7(rx_jabber_frames),
  1386. GBENU_STATS_P7(rx_undersized_frames),
  1387. GBENU_STATS_P7(rx_fragments),
  1388. GBENU_STATS_P7(ale_drop),
  1389. GBENU_STATS_P7(ale_overrun_drop),
  1390. GBENU_STATS_P7(rx_bytes),
  1391. GBENU_STATS_P7(tx_good_frames),
  1392. GBENU_STATS_P7(tx_broadcast_frames),
  1393. GBENU_STATS_P7(tx_multicast_frames),
  1394. GBENU_STATS_P7(tx_pause_frames),
  1395. GBENU_STATS_P7(tx_deferred_frames),
  1396. GBENU_STATS_P7(tx_collision_frames),
  1397. GBENU_STATS_P7(tx_single_coll_frames),
  1398. GBENU_STATS_P7(tx_mult_coll_frames),
  1399. GBENU_STATS_P7(tx_excessive_collisions),
  1400. GBENU_STATS_P7(tx_late_collisions),
  1401. GBENU_STATS_P7(rx_ipg_error),
  1402. GBENU_STATS_P7(tx_carrier_sense_errors),
  1403. GBENU_STATS_P7(tx_bytes),
  1404. GBENU_STATS_P7(tx_64B_frames),
  1405. GBENU_STATS_P7(tx_65_to_127B_frames),
  1406. GBENU_STATS_P7(tx_128_to_255B_frames),
  1407. GBENU_STATS_P7(tx_256_to_511B_frames),
  1408. GBENU_STATS_P7(tx_512_to_1023B_frames),
  1409. GBENU_STATS_P7(tx_1024B_frames),
  1410. GBENU_STATS_P7(net_bytes),
  1411. GBENU_STATS_P7(rx_bottom_fifo_drop),
  1412. GBENU_STATS_P7(rx_port_mask_drop),
  1413. GBENU_STATS_P7(rx_top_fifo_drop),
  1414. GBENU_STATS_P7(ale_rate_limit_drop),
  1415. GBENU_STATS_P7(ale_vid_ingress_drop),
  1416. GBENU_STATS_P7(ale_da_eq_sa_drop),
  1417. GBENU_STATS_P7(ale_unknown_ucast),
  1418. GBENU_STATS_P7(ale_unknown_ucast_bytes),
  1419. GBENU_STATS_P7(ale_unknown_mcast),
  1420. GBENU_STATS_P7(ale_unknown_mcast_bytes),
  1421. GBENU_STATS_P7(ale_unknown_bcast),
  1422. GBENU_STATS_P7(ale_unknown_bcast_bytes),
  1423. GBENU_STATS_P7(ale_pol_match),
  1424. GBENU_STATS_P7(ale_pol_match_red),
  1425. GBENU_STATS_P7(ale_pol_match_yellow),
  1426. GBENU_STATS_P7(tx_mem_protect_err),
  1427. GBENU_STATS_P7(tx_pri0_drop),
  1428. GBENU_STATS_P7(tx_pri1_drop),
  1429. GBENU_STATS_P7(tx_pri2_drop),
  1430. GBENU_STATS_P7(tx_pri3_drop),
  1431. GBENU_STATS_P7(tx_pri4_drop),
  1432. GBENU_STATS_P7(tx_pri5_drop),
  1433. GBENU_STATS_P7(tx_pri6_drop),
  1434. GBENU_STATS_P7(tx_pri7_drop),
  1435. GBENU_STATS_P7(tx_pri0_drop_bcnt),
  1436. GBENU_STATS_P7(tx_pri1_drop_bcnt),
  1437. GBENU_STATS_P7(tx_pri2_drop_bcnt),
  1438. GBENU_STATS_P7(tx_pri3_drop_bcnt),
  1439. GBENU_STATS_P7(tx_pri4_drop_bcnt),
  1440. GBENU_STATS_P7(tx_pri5_drop_bcnt),
  1441. GBENU_STATS_P7(tx_pri6_drop_bcnt),
  1442. GBENU_STATS_P7(tx_pri7_drop_bcnt),
  1443. /* GBENU Module 8 */
  1444. GBENU_STATS_P8(rx_good_frames),
  1445. GBENU_STATS_P8(rx_broadcast_frames),
  1446. GBENU_STATS_P8(rx_multicast_frames),
  1447. GBENU_STATS_P8(rx_pause_frames),
  1448. GBENU_STATS_P8(rx_crc_errors),
  1449. GBENU_STATS_P8(rx_align_code_errors),
  1450. GBENU_STATS_P8(rx_oversized_frames),
  1451. GBENU_STATS_P8(rx_jabber_frames),
  1452. GBENU_STATS_P8(rx_undersized_frames),
  1453. GBENU_STATS_P8(rx_fragments),
  1454. GBENU_STATS_P8(ale_drop),
  1455. GBENU_STATS_P8(ale_overrun_drop),
  1456. GBENU_STATS_P8(rx_bytes),
  1457. GBENU_STATS_P8(tx_good_frames),
  1458. GBENU_STATS_P8(tx_broadcast_frames),
  1459. GBENU_STATS_P8(tx_multicast_frames),
  1460. GBENU_STATS_P8(tx_pause_frames),
  1461. GBENU_STATS_P8(tx_deferred_frames),
  1462. GBENU_STATS_P8(tx_collision_frames),
  1463. GBENU_STATS_P8(tx_single_coll_frames),
  1464. GBENU_STATS_P8(tx_mult_coll_frames),
  1465. GBENU_STATS_P8(tx_excessive_collisions),
  1466. GBENU_STATS_P8(tx_late_collisions),
  1467. GBENU_STATS_P8(rx_ipg_error),
  1468. GBENU_STATS_P8(tx_carrier_sense_errors),
  1469. GBENU_STATS_P8(tx_bytes),
  1470. GBENU_STATS_P8(tx_64B_frames),
  1471. GBENU_STATS_P8(tx_65_to_127B_frames),
  1472. GBENU_STATS_P8(tx_128_to_255B_frames),
  1473. GBENU_STATS_P8(tx_256_to_511B_frames),
  1474. GBENU_STATS_P8(tx_512_to_1023B_frames),
  1475. GBENU_STATS_P8(tx_1024B_frames),
  1476. GBENU_STATS_P8(net_bytes),
  1477. GBENU_STATS_P8(rx_bottom_fifo_drop),
  1478. GBENU_STATS_P8(rx_port_mask_drop),
  1479. GBENU_STATS_P8(rx_top_fifo_drop),
  1480. GBENU_STATS_P8(ale_rate_limit_drop),
  1481. GBENU_STATS_P8(ale_vid_ingress_drop),
  1482. GBENU_STATS_P8(ale_da_eq_sa_drop),
  1483. GBENU_STATS_P8(ale_unknown_ucast),
  1484. GBENU_STATS_P8(ale_unknown_ucast_bytes),
  1485. GBENU_STATS_P8(ale_unknown_mcast),
  1486. GBENU_STATS_P8(ale_unknown_mcast_bytes),
  1487. GBENU_STATS_P8(ale_unknown_bcast),
  1488. GBENU_STATS_P8(ale_unknown_bcast_bytes),
  1489. GBENU_STATS_P8(ale_pol_match),
  1490. GBENU_STATS_P8(ale_pol_match_red),
  1491. GBENU_STATS_P8(ale_pol_match_yellow),
  1492. GBENU_STATS_P8(tx_mem_protect_err),
  1493. GBENU_STATS_P8(tx_pri0_drop),
  1494. GBENU_STATS_P8(tx_pri1_drop),
  1495. GBENU_STATS_P8(tx_pri2_drop),
  1496. GBENU_STATS_P8(tx_pri3_drop),
  1497. GBENU_STATS_P8(tx_pri4_drop),
  1498. GBENU_STATS_P8(tx_pri5_drop),
  1499. GBENU_STATS_P8(tx_pri6_drop),
  1500. GBENU_STATS_P8(tx_pri7_drop),
  1501. GBENU_STATS_P8(tx_pri0_drop_bcnt),
  1502. GBENU_STATS_P8(tx_pri1_drop_bcnt),
  1503. GBENU_STATS_P8(tx_pri2_drop_bcnt),
  1504. GBENU_STATS_P8(tx_pri3_drop_bcnt),
  1505. GBENU_STATS_P8(tx_pri4_drop_bcnt),
  1506. GBENU_STATS_P8(tx_pri5_drop_bcnt),
  1507. GBENU_STATS_P8(tx_pri6_drop_bcnt),
  1508. GBENU_STATS_P8(tx_pri7_drop_bcnt),
  1509. };
  1510. #define XGBE_STATS0_INFO(field) \
  1511. { \
  1512. "GBE_0:"#field, XGBE_STATS0_MODULE, \
  1513. sizeof_field(struct xgbe_hw_stats, field), \
  1514. offsetof(struct xgbe_hw_stats, field) \
  1515. }
  1516. #define XGBE_STATS1_INFO(field) \
  1517. { \
  1518. "GBE_1:"#field, XGBE_STATS1_MODULE, \
  1519. sizeof_field(struct xgbe_hw_stats, field), \
  1520. offsetof(struct xgbe_hw_stats, field) \
  1521. }
  1522. #define XGBE_STATS2_INFO(field) \
  1523. { \
  1524. "GBE_2:"#field, XGBE_STATS2_MODULE, \
  1525. sizeof_field(struct xgbe_hw_stats, field), \
  1526. offsetof(struct xgbe_hw_stats, field) \
  1527. }
  1528. static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
  1529. /* GBE module 0 */
  1530. XGBE_STATS0_INFO(rx_good_frames),
  1531. XGBE_STATS0_INFO(rx_broadcast_frames),
  1532. XGBE_STATS0_INFO(rx_multicast_frames),
  1533. XGBE_STATS0_INFO(rx_oversized_frames),
  1534. XGBE_STATS0_INFO(rx_undersized_frames),
  1535. XGBE_STATS0_INFO(overrun_type4),
  1536. XGBE_STATS0_INFO(overrun_type5),
  1537. XGBE_STATS0_INFO(rx_bytes),
  1538. XGBE_STATS0_INFO(tx_good_frames),
  1539. XGBE_STATS0_INFO(tx_broadcast_frames),
  1540. XGBE_STATS0_INFO(tx_multicast_frames),
  1541. XGBE_STATS0_INFO(tx_bytes),
  1542. XGBE_STATS0_INFO(tx_64byte_frames),
  1543. XGBE_STATS0_INFO(tx_65_to_127byte_frames),
  1544. XGBE_STATS0_INFO(tx_128_to_255byte_frames),
  1545. XGBE_STATS0_INFO(tx_256_to_511byte_frames),
  1546. XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
  1547. XGBE_STATS0_INFO(tx_1024byte_frames),
  1548. XGBE_STATS0_INFO(net_bytes),
  1549. XGBE_STATS0_INFO(rx_sof_overruns),
  1550. XGBE_STATS0_INFO(rx_mof_overruns),
  1551. XGBE_STATS0_INFO(rx_dma_overruns),
  1552. /* XGBE module 1 */
  1553. XGBE_STATS1_INFO(rx_good_frames),
  1554. XGBE_STATS1_INFO(rx_broadcast_frames),
  1555. XGBE_STATS1_INFO(rx_multicast_frames),
  1556. XGBE_STATS1_INFO(rx_pause_frames),
  1557. XGBE_STATS1_INFO(rx_crc_errors),
  1558. XGBE_STATS1_INFO(rx_align_code_errors),
  1559. XGBE_STATS1_INFO(rx_oversized_frames),
  1560. XGBE_STATS1_INFO(rx_jabber_frames),
  1561. XGBE_STATS1_INFO(rx_undersized_frames),
  1562. XGBE_STATS1_INFO(rx_fragments),
  1563. XGBE_STATS1_INFO(overrun_type4),
  1564. XGBE_STATS1_INFO(overrun_type5),
  1565. XGBE_STATS1_INFO(rx_bytes),
  1566. XGBE_STATS1_INFO(tx_good_frames),
  1567. XGBE_STATS1_INFO(tx_broadcast_frames),
  1568. XGBE_STATS1_INFO(tx_multicast_frames),
  1569. XGBE_STATS1_INFO(tx_pause_frames),
  1570. XGBE_STATS1_INFO(tx_deferred_frames),
  1571. XGBE_STATS1_INFO(tx_collision_frames),
  1572. XGBE_STATS1_INFO(tx_single_coll_frames),
  1573. XGBE_STATS1_INFO(tx_mult_coll_frames),
  1574. XGBE_STATS1_INFO(tx_excessive_collisions),
  1575. XGBE_STATS1_INFO(tx_late_collisions),
  1576. XGBE_STATS1_INFO(tx_underrun),
  1577. XGBE_STATS1_INFO(tx_carrier_sense_errors),
  1578. XGBE_STATS1_INFO(tx_bytes),
  1579. XGBE_STATS1_INFO(tx_64byte_frames),
  1580. XGBE_STATS1_INFO(tx_65_to_127byte_frames),
  1581. XGBE_STATS1_INFO(tx_128_to_255byte_frames),
  1582. XGBE_STATS1_INFO(tx_256_to_511byte_frames),
  1583. XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
  1584. XGBE_STATS1_INFO(tx_1024byte_frames),
  1585. XGBE_STATS1_INFO(net_bytes),
  1586. XGBE_STATS1_INFO(rx_sof_overruns),
  1587. XGBE_STATS1_INFO(rx_mof_overruns),
  1588. XGBE_STATS1_INFO(rx_dma_overruns),
  1589. /* XGBE module 2 */
  1590. XGBE_STATS2_INFO(rx_good_frames),
  1591. XGBE_STATS2_INFO(rx_broadcast_frames),
  1592. XGBE_STATS2_INFO(rx_multicast_frames),
  1593. XGBE_STATS2_INFO(rx_pause_frames),
  1594. XGBE_STATS2_INFO(rx_crc_errors),
  1595. XGBE_STATS2_INFO(rx_align_code_errors),
  1596. XGBE_STATS2_INFO(rx_oversized_frames),
  1597. XGBE_STATS2_INFO(rx_jabber_frames),
  1598. XGBE_STATS2_INFO(rx_undersized_frames),
  1599. XGBE_STATS2_INFO(rx_fragments),
  1600. XGBE_STATS2_INFO(overrun_type4),
  1601. XGBE_STATS2_INFO(overrun_type5),
  1602. XGBE_STATS2_INFO(rx_bytes),
  1603. XGBE_STATS2_INFO(tx_good_frames),
  1604. XGBE_STATS2_INFO(tx_broadcast_frames),
  1605. XGBE_STATS2_INFO(tx_multicast_frames),
  1606. XGBE_STATS2_INFO(tx_pause_frames),
  1607. XGBE_STATS2_INFO(tx_deferred_frames),
  1608. XGBE_STATS2_INFO(tx_collision_frames),
  1609. XGBE_STATS2_INFO(tx_single_coll_frames),
  1610. XGBE_STATS2_INFO(tx_mult_coll_frames),
  1611. XGBE_STATS2_INFO(tx_excessive_collisions),
  1612. XGBE_STATS2_INFO(tx_late_collisions),
  1613. XGBE_STATS2_INFO(tx_underrun),
  1614. XGBE_STATS2_INFO(tx_carrier_sense_errors),
  1615. XGBE_STATS2_INFO(tx_bytes),
  1616. XGBE_STATS2_INFO(tx_64byte_frames),
  1617. XGBE_STATS2_INFO(tx_65_to_127byte_frames),
  1618. XGBE_STATS2_INFO(tx_128_to_255byte_frames),
  1619. XGBE_STATS2_INFO(tx_256_to_511byte_frames),
  1620. XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
  1621. XGBE_STATS2_INFO(tx_1024byte_frames),
  1622. XGBE_STATS2_INFO(net_bytes),
  1623. XGBE_STATS2_INFO(rx_sof_overruns),
  1624. XGBE_STATS2_INFO(rx_mof_overruns),
  1625. XGBE_STATS2_INFO(rx_dma_overruns),
  1626. };
  1627. #define for_each_intf(i, priv) \
  1628. list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
  1629. #define for_each_sec_slave(slave, priv) \
  1630. list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
  1631. #define first_sec_slave(priv) \
  1632. list_first_entry(&priv->secondary_slaves, \
  1633. struct gbe_slave, slave_list)
  1634. static void keystone_get_drvinfo(struct net_device *ndev,
  1635. struct ethtool_drvinfo *info)
  1636. {
  1637. strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
  1638. strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
  1639. }
  1640. static u32 keystone_get_msglevel(struct net_device *ndev)
  1641. {
  1642. struct netcp_intf *netcp = netdev_priv(ndev);
  1643. return netcp->msg_enable;
  1644. }
  1645. static void keystone_set_msglevel(struct net_device *ndev, u32 value)
  1646. {
  1647. struct netcp_intf *netcp = netdev_priv(ndev);
  1648. netcp->msg_enable = value;
  1649. }
  1650. static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
  1651. {
  1652. struct gbe_intf *gbe_intf;
  1653. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1654. if (!gbe_intf)
  1655. gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
  1656. return gbe_intf;
  1657. }
  1658. static void keystone_get_stat_strings(struct net_device *ndev,
  1659. uint32_t stringset, uint8_t *data)
  1660. {
  1661. struct netcp_intf *netcp = netdev_priv(ndev);
  1662. struct gbe_intf *gbe_intf;
  1663. struct gbe_priv *gbe_dev;
  1664. int i;
  1665. gbe_intf = keystone_get_intf_data(netcp);
  1666. if (!gbe_intf)
  1667. return;
  1668. gbe_dev = gbe_intf->gbe_dev;
  1669. switch (stringset) {
  1670. case ETH_SS_STATS:
  1671. for (i = 0; i < gbe_dev->num_et_stats; i++) {
  1672. memcpy(data, gbe_dev->et_stats[i].desc,
  1673. ETH_GSTRING_LEN);
  1674. data += ETH_GSTRING_LEN;
  1675. }
  1676. break;
  1677. case ETH_SS_TEST:
  1678. break;
  1679. }
  1680. }
  1681. static int keystone_get_sset_count(struct net_device *ndev, int stringset)
  1682. {
  1683. struct netcp_intf *netcp = netdev_priv(ndev);
  1684. struct gbe_intf *gbe_intf;
  1685. struct gbe_priv *gbe_dev;
  1686. gbe_intf = keystone_get_intf_data(netcp);
  1687. if (!gbe_intf)
  1688. return -EINVAL;
  1689. gbe_dev = gbe_intf->gbe_dev;
  1690. switch (stringset) {
  1691. case ETH_SS_TEST:
  1692. return 0;
  1693. case ETH_SS_STATS:
  1694. return gbe_dev->num_et_stats;
  1695. default:
  1696. return -EINVAL;
  1697. }
  1698. }
  1699. static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
  1700. {
  1701. void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
  1702. u32 __iomem *p_stats_entry;
  1703. int i;
  1704. for (i = 0; i < gbe_dev->num_et_stats; i++) {
  1705. if (gbe_dev->et_stats[i].type == stats_mod) {
  1706. p_stats_entry = base + gbe_dev->et_stats[i].offset;
  1707. gbe_dev->hw_stats[i] = 0;
  1708. gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
  1709. }
  1710. }
  1711. }
  1712. static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
  1713. int et_stats_entry)
  1714. {
  1715. void __iomem *base = NULL;
  1716. u32 __iomem *p_stats_entry;
  1717. u32 curr, delta;
  1718. /* The hw_stats_regs pointers are already
  1719. * properly set to point to the right base:
  1720. */
  1721. base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
  1722. p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
  1723. curr = readl(p_stats_entry);
  1724. delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
  1725. gbe_dev->hw_stats_prev[et_stats_entry] = curr;
  1726. gbe_dev->hw_stats[et_stats_entry] += delta;
  1727. }
  1728. static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
  1729. {
  1730. int i;
  1731. for (i = 0; i < gbe_dev->num_et_stats; i++) {
  1732. gbe_update_hw_stats_entry(gbe_dev, i);
  1733. if (data)
  1734. data[i] = gbe_dev->hw_stats[i];
  1735. }
  1736. }
  1737. static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
  1738. int stats_mod)
  1739. {
  1740. u32 val;
  1741. val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
  1742. switch (stats_mod) {
  1743. case GBE_STATSA_MODULE:
  1744. case GBE_STATSB_MODULE:
  1745. val &= ~GBE_STATS_CD_SEL;
  1746. break;
  1747. case GBE_STATSC_MODULE:
  1748. case GBE_STATSD_MODULE:
  1749. val |= GBE_STATS_CD_SEL;
  1750. break;
  1751. default:
  1752. return;
  1753. }
  1754. /* make the stat module visible */
  1755. writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
  1756. }
  1757. static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
  1758. {
  1759. gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
  1760. gbe_reset_mod_stats(gbe_dev, stats_mod);
  1761. }
  1762. static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
  1763. {
  1764. u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
  1765. int et_entry, j, pair;
  1766. for (pair = 0; pair < 2; pair++) {
  1767. gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
  1768. GBE_STATSC_MODULE :
  1769. GBE_STATSA_MODULE));
  1770. for (j = 0; j < half_num_et_stats; j++) {
  1771. et_entry = pair * half_num_et_stats + j;
  1772. gbe_update_hw_stats_entry(gbe_dev, et_entry);
  1773. if (data)
  1774. data[et_entry] = gbe_dev->hw_stats[et_entry];
  1775. }
  1776. }
  1777. }
  1778. static void keystone_get_ethtool_stats(struct net_device *ndev,
  1779. struct ethtool_stats *stats,
  1780. uint64_t *data)
  1781. {
  1782. struct netcp_intf *netcp = netdev_priv(ndev);
  1783. struct gbe_intf *gbe_intf;
  1784. struct gbe_priv *gbe_dev;
  1785. gbe_intf = keystone_get_intf_data(netcp);
  1786. if (!gbe_intf)
  1787. return;
  1788. gbe_dev = gbe_intf->gbe_dev;
  1789. spin_lock_bh(&gbe_dev->hw_stats_lock);
  1790. if (IS_SS_ID_VER_14(gbe_dev))
  1791. gbe_update_stats_ver14(gbe_dev, data);
  1792. else
  1793. gbe_update_stats(gbe_dev, data);
  1794. spin_unlock_bh(&gbe_dev->hw_stats_lock);
  1795. }
  1796. static int keystone_get_link_ksettings(struct net_device *ndev,
  1797. struct ethtool_link_ksettings *cmd)
  1798. {
  1799. struct netcp_intf *netcp = netdev_priv(ndev);
  1800. struct phy_device *phy = ndev->phydev;
  1801. struct gbe_intf *gbe_intf;
  1802. if (!phy)
  1803. return -EINVAL;
  1804. gbe_intf = keystone_get_intf_data(netcp);
  1805. if (!gbe_intf)
  1806. return -EINVAL;
  1807. if (!gbe_intf->slave)
  1808. return -EINVAL;
  1809. phy_ethtool_ksettings_get(phy, cmd);
  1810. cmd->base.port = gbe_intf->slave->phy_port_t;
  1811. return 0;
  1812. }
  1813. static int keystone_set_link_ksettings(struct net_device *ndev,
  1814. const struct ethtool_link_ksettings *cmd)
  1815. {
  1816. struct netcp_intf *netcp = netdev_priv(ndev);
  1817. struct phy_device *phy = ndev->phydev;
  1818. struct gbe_intf *gbe_intf;
  1819. u8 port = cmd->base.port;
  1820. u32 advertising, supported;
  1821. u32 features;
  1822. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  1823. cmd->link_modes.advertising);
  1824. ethtool_convert_link_mode_to_legacy_u32(&supported,
  1825. cmd->link_modes.supported);
  1826. features = advertising & supported;
  1827. if (!phy)
  1828. return -EINVAL;
  1829. gbe_intf = keystone_get_intf_data(netcp);
  1830. if (!gbe_intf)
  1831. return -EINVAL;
  1832. if (!gbe_intf->slave)
  1833. return -EINVAL;
  1834. if (port != gbe_intf->slave->phy_port_t) {
  1835. if ((port == PORT_TP) && !(features & ADVERTISED_TP))
  1836. return -EINVAL;
  1837. if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
  1838. return -EINVAL;
  1839. if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
  1840. return -EINVAL;
  1841. if ((port == PORT_MII) && !(features & ADVERTISED_MII))
  1842. return -EINVAL;
  1843. if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
  1844. return -EINVAL;
  1845. }
  1846. gbe_intf->slave->phy_port_t = port;
  1847. return phy_ethtool_ksettings_set(phy, cmd);
  1848. }
  1849. #if IS_ENABLED(CONFIG_TI_CPTS)
  1850. static int keystone_get_ts_info(struct net_device *ndev,
  1851. struct ethtool_ts_info *info)
  1852. {
  1853. struct netcp_intf *netcp = netdev_priv(ndev);
  1854. struct gbe_intf *gbe_intf;
  1855. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  1856. if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
  1857. return -EINVAL;
  1858. info->so_timestamping =
  1859. SOF_TIMESTAMPING_TX_HARDWARE |
  1860. SOF_TIMESTAMPING_TX_SOFTWARE |
  1861. SOF_TIMESTAMPING_RX_HARDWARE |
  1862. SOF_TIMESTAMPING_RX_SOFTWARE |
  1863. SOF_TIMESTAMPING_SOFTWARE |
  1864. SOF_TIMESTAMPING_RAW_HARDWARE;
  1865. info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
  1866. info->tx_types =
  1867. (1 << HWTSTAMP_TX_OFF) |
  1868. (1 << HWTSTAMP_TX_ON);
  1869. info->rx_filters =
  1870. (1 << HWTSTAMP_FILTER_NONE) |
  1871. (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
  1872. (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
  1873. return 0;
  1874. }
  1875. #else
  1876. static int keystone_get_ts_info(struct net_device *ndev,
  1877. struct ethtool_ts_info *info)
  1878. {
  1879. info->so_timestamping =
  1880. SOF_TIMESTAMPING_TX_SOFTWARE |
  1881. SOF_TIMESTAMPING_RX_SOFTWARE |
  1882. SOF_TIMESTAMPING_SOFTWARE;
  1883. info->phc_index = -1;
  1884. info->tx_types = 0;
  1885. info->rx_filters = 0;
  1886. return 0;
  1887. }
  1888. #endif /* CONFIG_TI_CPTS */
  1889. static const struct ethtool_ops keystone_ethtool_ops = {
  1890. .get_drvinfo = keystone_get_drvinfo,
  1891. .get_link = ethtool_op_get_link,
  1892. .get_msglevel = keystone_get_msglevel,
  1893. .set_msglevel = keystone_set_msglevel,
  1894. .get_strings = keystone_get_stat_strings,
  1895. .get_sset_count = keystone_get_sset_count,
  1896. .get_ethtool_stats = keystone_get_ethtool_stats,
  1897. .get_link_ksettings = keystone_get_link_ksettings,
  1898. .set_link_ksettings = keystone_set_link_ksettings,
  1899. .get_ts_info = keystone_get_ts_info,
  1900. };
  1901. static void gbe_set_slave_mac(struct gbe_slave *slave,
  1902. struct gbe_intf *gbe_intf)
  1903. {
  1904. struct net_device *ndev = gbe_intf->ndev;
  1905. writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
  1906. writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
  1907. }
  1908. static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
  1909. {
  1910. if (priv->host_port == 0)
  1911. return slave_num + 1;
  1912. return slave_num;
  1913. }
  1914. static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
  1915. struct net_device *ndev,
  1916. struct gbe_slave *slave,
  1917. int up)
  1918. {
  1919. struct phy_device *phy = slave->phy;
  1920. u32 mac_control = 0;
  1921. if (up) {
  1922. mac_control = slave->mac_control;
  1923. if (phy && (phy->speed == SPEED_1000)) {
  1924. mac_control |= MACSL_GIG_MODE;
  1925. mac_control &= ~MACSL_XGIG_MODE;
  1926. } else if (phy && (phy->speed == SPEED_10000)) {
  1927. mac_control |= MACSL_XGIG_MODE;
  1928. mac_control &= ~MACSL_GIG_MODE;
  1929. }
  1930. writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
  1931. mac_control));
  1932. cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
  1933. ALE_PORT_STATE,
  1934. ALE_PORT_STATE_FORWARD);
  1935. if (ndev && slave->open &&
  1936. ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
  1937. (slave->link_interface != RGMII_LINK_MAC_PHY) &&
  1938. (slave->link_interface != XGMII_LINK_MAC_PHY)))
  1939. netif_carrier_on(ndev);
  1940. } else {
  1941. writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
  1942. mac_control));
  1943. cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
  1944. ALE_PORT_STATE,
  1945. ALE_PORT_STATE_DISABLE);
  1946. if (ndev &&
  1947. ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
  1948. (slave->link_interface != RGMII_LINK_MAC_PHY) &&
  1949. (slave->link_interface != XGMII_LINK_MAC_PHY)))
  1950. netif_carrier_off(ndev);
  1951. }
  1952. if (phy)
  1953. phy_print_status(phy);
  1954. }
  1955. static bool gbe_phy_link_status(struct gbe_slave *slave)
  1956. {
  1957. return !slave->phy || slave->phy->link;
  1958. }
  1959. #define RGMII_REG_STATUS_LINK BIT(0)
  1960. static void netcp_2u_rgmii_get_port_link(struct gbe_priv *gbe_dev, bool *status)
  1961. {
  1962. u32 val = 0;
  1963. val = readl(GBE_REG_ADDR(gbe_dev, ss_regs, rgmii_status));
  1964. *status = !!(val & RGMII_REG_STATUS_LINK);
  1965. }
  1966. static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
  1967. struct gbe_slave *slave,
  1968. struct net_device *ndev)
  1969. {
  1970. bool sw_link_state = true, phy_link_state;
  1971. int sp = slave->slave_num, link_state;
  1972. if (!slave->open)
  1973. return;
  1974. if (SLAVE_LINK_IS_RGMII(slave))
  1975. netcp_2u_rgmii_get_port_link(gbe_dev,
  1976. &sw_link_state);
  1977. if (SLAVE_LINK_IS_SGMII(slave))
  1978. sw_link_state =
  1979. netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
  1980. phy_link_state = gbe_phy_link_status(slave);
  1981. link_state = phy_link_state & sw_link_state;
  1982. if (atomic_xchg(&slave->link_state, link_state) != link_state)
  1983. netcp_ethss_link_state_action(gbe_dev, ndev, slave,
  1984. link_state);
  1985. }
  1986. static void xgbe_adjust_link(struct net_device *ndev)
  1987. {
  1988. struct netcp_intf *netcp = netdev_priv(ndev);
  1989. struct gbe_intf *gbe_intf;
  1990. gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
  1991. if (!gbe_intf)
  1992. return;
  1993. netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
  1994. ndev);
  1995. }
  1996. static void gbe_adjust_link(struct net_device *ndev)
  1997. {
  1998. struct netcp_intf *netcp = netdev_priv(ndev);
  1999. struct gbe_intf *gbe_intf;
  2000. gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
  2001. if (!gbe_intf)
  2002. return;
  2003. netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
  2004. ndev);
  2005. }
  2006. static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
  2007. {
  2008. struct gbe_priv *gbe_dev = netdev_priv(ndev);
  2009. struct gbe_slave *slave;
  2010. for_each_sec_slave(slave, gbe_dev)
  2011. netcp_ethss_update_link_state(gbe_dev, slave, NULL);
  2012. }
  2013. /* Reset EMAC
  2014. * Soft reset is set and polled until clear, or until a timeout occurs
  2015. */
  2016. static int gbe_port_reset(struct gbe_slave *slave)
  2017. {
  2018. u32 i, v;
  2019. /* Set the soft reset bit */
  2020. writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
  2021. /* Wait for the bit to clear */
  2022. for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
  2023. v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
  2024. if ((v & SOFT_RESET_MASK) != SOFT_RESET)
  2025. return 0;
  2026. }
  2027. /* Timeout on the reset */
  2028. return GMACSL_RET_WARN_RESET_INCOMPLETE;
  2029. }
  2030. /* Configure EMAC */
  2031. static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
  2032. int max_rx_len)
  2033. {
  2034. void __iomem *rx_maxlen_reg;
  2035. u32 xgmii_mode;
  2036. if (max_rx_len > NETCP_MAX_FRAME_SIZE)
  2037. max_rx_len = NETCP_MAX_FRAME_SIZE;
  2038. /* Enable correct MII mode at SS level */
  2039. if (IS_SS_ID_XGBE(gbe_dev) &&
  2040. (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
  2041. xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
  2042. xgmii_mode |= (1 << slave->slave_num);
  2043. writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
  2044. }
  2045. if (IS_SS_ID_MU(gbe_dev))
  2046. rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
  2047. else
  2048. rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
  2049. writel(max_rx_len, rx_maxlen_reg);
  2050. writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
  2051. }
  2052. static void gbe_sgmii_rtreset(struct gbe_priv *priv,
  2053. struct gbe_slave *slave, bool set)
  2054. {
  2055. if (SLAVE_LINK_IS_XGMII(slave))
  2056. return;
  2057. netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
  2058. slave->slave_num, set);
  2059. }
  2060. static void gbe_slave_stop(struct gbe_intf *intf)
  2061. {
  2062. struct gbe_priv *gbe_dev = intf->gbe_dev;
  2063. struct gbe_slave *slave = intf->slave;
  2064. if (!IS_SS_ID_2U(gbe_dev))
  2065. gbe_sgmii_rtreset(gbe_dev, slave, true);
  2066. gbe_port_reset(slave);
  2067. /* Disable forwarding */
  2068. cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
  2069. ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
  2070. cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
  2071. 1 << slave->port_num, 0, 0);
  2072. if (!slave->phy)
  2073. return;
  2074. phy_stop(slave->phy);
  2075. phy_disconnect(slave->phy);
  2076. slave->phy = NULL;
  2077. }
  2078. static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
  2079. {
  2080. if (SLAVE_LINK_IS_XGMII(slave))
  2081. return;
  2082. netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
  2083. netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
  2084. slave->link_interface);
  2085. }
  2086. static int gbe_slave_open(struct gbe_intf *gbe_intf)
  2087. {
  2088. struct gbe_priv *priv = gbe_intf->gbe_dev;
  2089. struct gbe_slave *slave = gbe_intf->slave;
  2090. phy_interface_t phy_mode;
  2091. bool has_phy = false;
  2092. int err;
  2093. void (*hndlr)(struct net_device *) = gbe_adjust_link;
  2094. if (!IS_SS_ID_2U(priv))
  2095. gbe_sgmii_config(priv, slave);
  2096. gbe_port_reset(slave);
  2097. if (!IS_SS_ID_2U(priv))
  2098. gbe_sgmii_rtreset(priv, slave, false);
  2099. gbe_port_config(priv, slave, priv->rx_packet_max);
  2100. gbe_set_slave_mac(slave, gbe_intf);
  2101. /* For NU & 2U switch, map the vlan priorities to zero
  2102. * as we only configure to use priority 0
  2103. */
  2104. if (IS_SS_ID_MU(priv))
  2105. writel(HOST_TX_PRI_MAP_DEFAULT,
  2106. GBE_REG_ADDR(slave, port_regs, rx_pri_map));
  2107. /* enable forwarding */
  2108. cpsw_ale_control_set(priv->ale, slave->port_num,
  2109. ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
  2110. cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
  2111. 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
  2112. if (slave->link_interface == SGMII_LINK_MAC_PHY) {
  2113. has_phy = true;
  2114. phy_mode = PHY_INTERFACE_MODE_SGMII;
  2115. slave->phy_port_t = PORT_MII;
  2116. } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
  2117. has_phy = true;
  2118. err = of_get_phy_mode(slave->node, &phy_mode);
  2119. /* if phy-mode is not present, default to
  2120. * PHY_INTERFACE_MODE_RGMII
  2121. */
  2122. if (err)
  2123. phy_mode = PHY_INTERFACE_MODE_RGMII;
  2124. if (!phy_interface_mode_is_rgmii(phy_mode)) {
  2125. dev_err(priv->dev,
  2126. "Unsupported phy mode %d\n", phy_mode);
  2127. return -EINVAL;
  2128. }
  2129. slave->phy_port_t = PORT_MII;
  2130. } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
  2131. has_phy = true;
  2132. phy_mode = PHY_INTERFACE_MODE_NA;
  2133. slave->phy_port_t = PORT_FIBRE;
  2134. }
  2135. if (has_phy) {
  2136. if (IS_SS_ID_XGBE(priv))
  2137. hndlr = xgbe_adjust_link;
  2138. slave->phy = of_phy_connect(gbe_intf->ndev,
  2139. slave->phy_node,
  2140. hndlr, 0,
  2141. phy_mode);
  2142. if (!slave->phy) {
  2143. dev_err(priv->dev, "phy not found on slave %d\n",
  2144. slave->slave_num);
  2145. return -ENODEV;
  2146. }
  2147. dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
  2148. phydev_name(slave->phy));
  2149. phy_start(slave->phy);
  2150. }
  2151. return 0;
  2152. }
  2153. static void gbe_init_host_port(struct gbe_priv *priv)
  2154. {
  2155. int bypass_en = 1;
  2156. /* Host Tx Pri */
  2157. if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
  2158. writel(HOST_TX_PRI_MAP_DEFAULT,
  2159. GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
  2160. /* Max length register */
  2161. writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
  2162. rx_maxlen));
  2163. cpsw_ale_start(priv->ale);
  2164. if (priv->enable_ale)
  2165. bypass_en = 0;
  2166. cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
  2167. cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
  2168. cpsw_ale_control_set(priv->ale, priv->host_port,
  2169. ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
  2170. cpsw_ale_control_set(priv->ale, 0,
  2171. ALE_PORT_UNKNOWN_VLAN_MEMBER,
  2172. GBE_PORT_MASK(priv->ale_ports));
  2173. cpsw_ale_control_set(priv->ale, 0,
  2174. ALE_PORT_UNKNOWN_MCAST_FLOOD,
  2175. GBE_PORT_MASK(priv->ale_ports - 1));
  2176. cpsw_ale_control_set(priv->ale, 0,
  2177. ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
  2178. GBE_PORT_MASK(priv->ale_ports));
  2179. cpsw_ale_control_set(priv->ale, 0,
  2180. ALE_PORT_UNTAGGED_EGRESS,
  2181. GBE_PORT_MASK(priv->ale_ports));
  2182. }
  2183. static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  2184. {
  2185. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2186. u16 vlan_id;
  2187. cpsw_ale_add_mcast(gbe_dev->ale, addr,
  2188. GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
  2189. ALE_MCAST_FWD_2);
  2190. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
  2191. cpsw_ale_add_mcast(gbe_dev->ale, addr,
  2192. GBE_PORT_MASK(gbe_dev->ale_ports),
  2193. ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
  2194. }
  2195. }
  2196. static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  2197. {
  2198. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2199. u16 vlan_id;
  2200. cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
  2201. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
  2202. cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
  2203. ALE_VLAN, vlan_id);
  2204. }
  2205. static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  2206. {
  2207. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2208. u16 vlan_id;
  2209. cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
  2210. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
  2211. cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
  2212. }
  2213. }
  2214. static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
  2215. {
  2216. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2217. u16 vlan_id;
  2218. cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
  2219. for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
  2220. cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
  2221. ALE_VLAN, vlan_id);
  2222. }
  2223. }
  2224. static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
  2225. {
  2226. struct gbe_intf *gbe_intf = intf_priv;
  2227. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2228. dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
  2229. naddr->addr, naddr->type);
  2230. switch (naddr->type) {
  2231. case ADDR_MCAST:
  2232. case ADDR_BCAST:
  2233. gbe_add_mcast_addr(gbe_intf, naddr->addr);
  2234. break;
  2235. case ADDR_UCAST:
  2236. case ADDR_DEV:
  2237. gbe_add_ucast_addr(gbe_intf, naddr->addr);
  2238. break;
  2239. case ADDR_ANY:
  2240. /* nothing to do for promiscuous */
  2241. default:
  2242. break;
  2243. }
  2244. return 0;
  2245. }
  2246. static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
  2247. {
  2248. struct gbe_intf *gbe_intf = intf_priv;
  2249. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2250. dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
  2251. naddr->addr, naddr->type);
  2252. switch (naddr->type) {
  2253. case ADDR_MCAST:
  2254. case ADDR_BCAST:
  2255. gbe_del_mcast_addr(gbe_intf, naddr->addr);
  2256. break;
  2257. case ADDR_UCAST:
  2258. case ADDR_DEV:
  2259. gbe_del_ucast_addr(gbe_intf, naddr->addr);
  2260. break;
  2261. case ADDR_ANY:
  2262. /* nothing to do for promiscuous */
  2263. default:
  2264. break;
  2265. }
  2266. return 0;
  2267. }
  2268. static int gbe_add_vid(void *intf_priv, int vid)
  2269. {
  2270. struct gbe_intf *gbe_intf = intf_priv;
  2271. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2272. set_bit(vid, gbe_intf->active_vlans);
  2273. cpsw_ale_add_vlan(gbe_dev->ale, vid,
  2274. GBE_PORT_MASK(gbe_dev->ale_ports),
  2275. GBE_MASK_NO_PORTS,
  2276. GBE_PORT_MASK(gbe_dev->ale_ports),
  2277. GBE_PORT_MASK(gbe_dev->ale_ports - 1));
  2278. return 0;
  2279. }
  2280. static int gbe_del_vid(void *intf_priv, int vid)
  2281. {
  2282. struct gbe_intf *gbe_intf = intf_priv;
  2283. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2284. cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
  2285. clear_bit(vid, gbe_intf->active_vlans);
  2286. return 0;
  2287. }
  2288. #if IS_ENABLED(CONFIG_TI_CPTS)
  2289. static void gbe_txtstamp(void *context, struct sk_buff *skb)
  2290. {
  2291. struct gbe_intf *gbe_intf = context;
  2292. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2293. cpts_tx_timestamp(gbe_dev->cpts, skb);
  2294. }
  2295. static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
  2296. const struct netcp_packet *p_info)
  2297. {
  2298. struct sk_buff *skb = p_info->skb;
  2299. return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
  2300. }
  2301. static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
  2302. struct netcp_packet *p_info)
  2303. {
  2304. struct phy_device *phydev = p_info->skb->dev->phydev;
  2305. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2306. if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
  2307. !gbe_dev->tx_ts_enabled)
  2308. return 0;
  2309. /* If phy has the txtstamp api, assume it will do it.
  2310. * We mark it here because skb_tx_timestamp() is called
  2311. * after all the txhooks are called.
  2312. */
  2313. if (phy_has_txtstamp(phydev)) {
  2314. skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
  2315. return 0;
  2316. }
  2317. if (gbe_need_txtstamp(gbe_intf, p_info)) {
  2318. p_info->txtstamp = gbe_txtstamp;
  2319. p_info->ts_context = (void *)gbe_intf;
  2320. skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
  2321. }
  2322. return 0;
  2323. }
  2324. static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
  2325. {
  2326. struct phy_device *phydev = p_info->skb->dev->phydev;
  2327. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2328. if (p_info->rxtstamp_complete)
  2329. return 0;
  2330. if (phy_has_rxtstamp(phydev)) {
  2331. p_info->rxtstamp_complete = true;
  2332. return 0;
  2333. }
  2334. if (gbe_dev->rx_ts_enabled)
  2335. cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
  2336. p_info->rxtstamp_complete = true;
  2337. return 0;
  2338. }
  2339. static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
  2340. {
  2341. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2342. struct cpts *cpts = gbe_dev->cpts;
  2343. struct hwtstamp_config cfg;
  2344. if (!cpts)
  2345. return -EOPNOTSUPP;
  2346. cfg.flags = 0;
  2347. cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
  2348. cfg.rx_filter = gbe_dev->rx_ts_enabled;
  2349. return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  2350. }
  2351. static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
  2352. {
  2353. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2354. struct gbe_slave *slave = gbe_intf->slave;
  2355. u32 ts_en, seq_id, ctl;
  2356. if (!gbe_dev->rx_ts_enabled &&
  2357. !gbe_dev->tx_ts_enabled) {
  2358. writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
  2359. return;
  2360. }
  2361. seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
  2362. ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
  2363. ctl = ETH_P_1588 | TS_TTL_NONZERO |
  2364. (slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
  2365. (slave->ts_ctl.uni ? TS_UNI_EN :
  2366. slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
  2367. if (gbe_dev->tx_ts_enabled)
  2368. ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
  2369. if (gbe_dev->rx_ts_enabled)
  2370. ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
  2371. writel(ts_en, GBE_REG_ADDR(slave, port_regs, ts_ctl));
  2372. writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
  2373. writel(ctl, GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
  2374. }
  2375. static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
  2376. {
  2377. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2378. struct cpts *cpts = gbe_dev->cpts;
  2379. struct hwtstamp_config cfg;
  2380. if (!cpts)
  2381. return -EOPNOTSUPP;
  2382. if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
  2383. return -EFAULT;
  2384. switch (cfg.tx_type) {
  2385. case HWTSTAMP_TX_OFF:
  2386. gbe_dev->tx_ts_enabled = 0;
  2387. break;
  2388. case HWTSTAMP_TX_ON:
  2389. gbe_dev->tx_ts_enabled = 1;
  2390. break;
  2391. default:
  2392. return -ERANGE;
  2393. }
  2394. switch (cfg.rx_filter) {
  2395. case HWTSTAMP_FILTER_NONE:
  2396. gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
  2397. break;
  2398. case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
  2399. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  2400. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  2401. gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
  2402. cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
  2403. break;
  2404. case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
  2405. case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
  2406. case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
  2407. case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
  2408. case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
  2409. case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
  2410. case HWTSTAMP_FILTER_PTP_V2_EVENT:
  2411. case HWTSTAMP_FILTER_PTP_V2_SYNC:
  2412. case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
  2413. gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
  2414. cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
  2415. break;
  2416. default:
  2417. return -ERANGE;
  2418. }
  2419. gbe_hwtstamp(gbe_intf);
  2420. return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  2421. }
  2422. static void gbe_register_cpts(struct gbe_priv *gbe_dev)
  2423. {
  2424. if (!gbe_dev->cpts)
  2425. return;
  2426. if (gbe_dev->cpts_registered > 0)
  2427. goto done;
  2428. if (cpts_register(gbe_dev->cpts)) {
  2429. dev_err(gbe_dev->dev, "error registering cpts device\n");
  2430. return;
  2431. }
  2432. done:
  2433. ++gbe_dev->cpts_registered;
  2434. }
  2435. static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
  2436. {
  2437. if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
  2438. return;
  2439. if (--gbe_dev->cpts_registered)
  2440. return;
  2441. cpts_unregister(gbe_dev->cpts);
  2442. }
  2443. #else
  2444. static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
  2445. struct netcp_packet *p_info)
  2446. {
  2447. return 0;
  2448. }
  2449. static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
  2450. struct netcp_packet *p_info)
  2451. {
  2452. return 0;
  2453. }
  2454. static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
  2455. struct ifreq *ifr, int cmd)
  2456. {
  2457. return -EOPNOTSUPP;
  2458. }
  2459. static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
  2460. {
  2461. }
  2462. static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
  2463. {
  2464. }
  2465. static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
  2466. {
  2467. return -EOPNOTSUPP;
  2468. }
  2469. static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
  2470. {
  2471. return -EOPNOTSUPP;
  2472. }
  2473. #endif /* CONFIG_TI_CPTS */
  2474. static int gbe_set_rx_mode(void *intf_priv, bool promisc)
  2475. {
  2476. struct gbe_intf *gbe_intf = intf_priv;
  2477. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2478. struct cpsw_ale *ale = gbe_dev->ale;
  2479. unsigned long timeout;
  2480. int i, ret = -ETIMEDOUT;
  2481. /* Disable(1)/Enable(0) Learn for all ports (host is port 0 and
  2482. * slaves are port 1 and up
  2483. */
  2484. for (i = 0; i <= gbe_dev->num_slaves; i++) {
  2485. cpsw_ale_control_set(ale, i,
  2486. ALE_PORT_NOLEARN, !!promisc);
  2487. cpsw_ale_control_set(ale, i,
  2488. ALE_PORT_NO_SA_UPDATE, !!promisc);
  2489. }
  2490. if (!promisc) {
  2491. /* Don't Flood All Unicast Packets to Host port */
  2492. cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
  2493. dev_vdbg(gbe_dev->dev, "promiscuous mode disabled\n");
  2494. return 0;
  2495. }
  2496. timeout = jiffies + HZ;
  2497. /* Clear All Untouched entries */
  2498. cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
  2499. do {
  2500. cpu_relax();
  2501. if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) {
  2502. ret = 0;
  2503. break;
  2504. }
  2505. } while (time_after(timeout, jiffies));
  2506. /* Make sure it is not a false timeout */
  2507. if (ret && !cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
  2508. return ret;
  2509. cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
  2510. /* Clear all mcast from ALE */
  2511. cpsw_ale_flush_multicast(ale,
  2512. GBE_PORT_MASK(gbe_dev->ale_ports),
  2513. -1);
  2514. /* Flood All Unicast Packets to Host port */
  2515. cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
  2516. dev_vdbg(gbe_dev->dev, "promiscuous mode enabled\n");
  2517. return ret;
  2518. }
  2519. static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
  2520. {
  2521. struct gbe_intf *gbe_intf = intf_priv;
  2522. struct phy_device *phy = gbe_intf->slave->phy;
  2523. if (!phy_has_hwtstamp(phy)) {
  2524. switch (cmd) {
  2525. case SIOCGHWTSTAMP:
  2526. return gbe_hwtstamp_get(gbe_intf, req);
  2527. case SIOCSHWTSTAMP:
  2528. return gbe_hwtstamp_set(gbe_intf, req);
  2529. }
  2530. }
  2531. if (phy)
  2532. return phy_mii_ioctl(phy, req, cmd);
  2533. return -EOPNOTSUPP;
  2534. }
  2535. static void netcp_ethss_timer(struct timer_list *t)
  2536. {
  2537. struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
  2538. struct gbe_intf *gbe_intf;
  2539. struct gbe_slave *slave;
  2540. /* Check & update SGMII link state of interfaces */
  2541. for_each_intf(gbe_intf, gbe_dev) {
  2542. if (!gbe_intf->slave->open)
  2543. continue;
  2544. netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
  2545. gbe_intf->ndev);
  2546. }
  2547. /* Check & update SGMII link state of secondary ports */
  2548. for_each_sec_slave(slave, gbe_dev) {
  2549. netcp_ethss_update_link_state(gbe_dev, slave, NULL);
  2550. }
  2551. /* A timer runs as a BH, no need to block them */
  2552. spin_lock(&gbe_dev->hw_stats_lock);
  2553. if (IS_SS_ID_VER_14(gbe_dev))
  2554. gbe_update_stats_ver14(gbe_dev, NULL);
  2555. else
  2556. gbe_update_stats(gbe_dev, NULL);
  2557. spin_unlock(&gbe_dev->hw_stats_lock);
  2558. gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
  2559. add_timer(&gbe_dev->timer);
  2560. }
  2561. static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
  2562. {
  2563. struct gbe_intf *gbe_intf = data;
  2564. p_info->tx_pipe = &gbe_intf->tx_pipe;
  2565. return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
  2566. }
  2567. static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
  2568. {
  2569. struct gbe_intf *gbe_intf = data;
  2570. return gbe_rxtstamp(gbe_intf, p_info);
  2571. }
  2572. static int gbe_open(void *intf_priv, struct net_device *ndev)
  2573. {
  2574. struct gbe_intf *gbe_intf = intf_priv;
  2575. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2576. struct netcp_intf *netcp = netdev_priv(ndev);
  2577. struct gbe_slave *slave = gbe_intf->slave;
  2578. int port_num = slave->port_num;
  2579. u32 reg, val;
  2580. int ret;
  2581. reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
  2582. dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
  2583. GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
  2584. GBE_RTL_VERSION(reg), GBE_IDENT(reg));
  2585. /* For 10G and on NetCP 1.5, use directed to port */
  2586. if (IS_SS_ID_XGBE(gbe_dev) || IS_SS_ID_MU(gbe_dev))
  2587. gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
  2588. if (gbe_dev->enable_ale)
  2589. gbe_intf->tx_pipe.switch_to_port = 0;
  2590. else
  2591. gbe_intf->tx_pipe.switch_to_port = port_num;
  2592. dev_dbg(gbe_dev->dev,
  2593. "opened TX channel %s: %p with to port %d, flags %d\n",
  2594. gbe_intf->tx_pipe.dma_chan_name,
  2595. gbe_intf->tx_pipe.dma_channel,
  2596. gbe_intf->tx_pipe.switch_to_port,
  2597. gbe_intf->tx_pipe.flags);
  2598. gbe_slave_stop(gbe_intf);
  2599. /* disable priority elevation and enable statistics on all ports */
  2600. writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
  2601. /* Control register */
  2602. val = GBE_CTL_P0_ENABLE;
  2603. if (IS_SS_ID_MU(gbe_dev)) {
  2604. val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
  2605. netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
  2606. }
  2607. writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
  2608. /* All statistics enabled and STAT AB visible by default */
  2609. writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
  2610. stat_port_en));
  2611. ret = gbe_slave_open(gbe_intf);
  2612. if (ret)
  2613. goto fail;
  2614. netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
  2615. netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
  2616. slave->open = true;
  2617. netcp_ethss_update_link_state(gbe_dev, slave, ndev);
  2618. gbe_register_cpts(gbe_dev);
  2619. return 0;
  2620. fail:
  2621. gbe_slave_stop(gbe_intf);
  2622. return ret;
  2623. }
  2624. static int gbe_close(void *intf_priv, struct net_device *ndev)
  2625. {
  2626. struct gbe_intf *gbe_intf = intf_priv;
  2627. struct netcp_intf *netcp = netdev_priv(ndev);
  2628. struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
  2629. gbe_unregister_cpts(gbe_dev);
  2630. gbe_slave_stop(gbe_intf);
  2631. netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
  2632. netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
  2633. gbe_intf->slave->open = false;
  2634. atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
  2635. return 0;
  2636. }
  2637. #if IS_ENABLED(CONFIG_TI_CPTS)
  2638. static void init_slave_ts_ctl(struct gbe_slave *slave)
  2639. {
  2640. slave->ts_ctl.uni = 1;
  2641. slave->ts_ctl.dst_port_map =
  2642. (TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
  2643. slave->ts_ctl.maddr_map =
  2644. (TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
  2645. }
  2646. #else
  2647. static void init_slave_ts_ctl(struct gbe_slave *slave)
  2648. {
  2649. }
  2650. #endif /* CONFIG_TI_CPTS */
  2651. static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
  2652. struct device_node *node)
  2653. {
  2654. int port_reg_num;
  2655. u32 port_reg_ofs, emac_reg_ofs;
  2656. u32 port_reg_blk_sz, emac_reg_blk_sz;
  2657. if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
  2658. dev_err(gbe_dev->dev, "missing slave-port parameter\n");
  2659. return -EINVAL;
  2660. }
  2661. if (of_property_read_u32(node, "link-interface",
  2662. &slave->link_interface)) {
  2663. dev_warn(gbe_dev->dev,
  2664. "missing link-interface value defaulting to 1G mac-phy link\n");
  2665. slave->link_interface = SGMII_LINK_MAC_PHY;
  2666. }
  2667. slave->node = node;
  2668. slave->open = false;
  2669. if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
  2670. (slave->link_interface == RGMII_LINK_MAC_PHY) ||
  2671. (slave->link_interface == XGMII_LINK_MAC_PHY))
  2672. slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
  2673. slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
  2674. if (slave->link_interface >= XGMII_LINK_MAC_PHY)
  2675. slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
  2676. else
  2677. slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
  2678. /* Emac regs memmap are contiguous but port regs are not */
  2679. port_reg_num = slave->slave_num;
  2680. if (IS_SS_ID_VER_14(gbe_dev)) {
  2681. if (slave->slave_num > 1) {
  2682. port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
  2683. port_reg_num -= 2;
  2684. } else {
  2685. port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
  2686. }
  2687. emac_reg_ofs = GBE13_EMAC_OFFSET;
  2688. port_reg_blk_sz = 0x30;
  2689. emac_reg_blk_sz = 0x40;
  2690. } else if (IS_SS_ID_MU(gbe_dev)) {
  2691. port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
  2692. emac_reg_ofs = GBENU_EMAC_OFFSET;
  2693. port_reg_blk_sz = 0x1000;
  2694. emac_reg_blk_sz = 0x1000;
  2695. } else if (IS_SS_ID_XGBE(gbe_dev)) {
  2696. port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
  2697. emac_reg_ofs = XGBE10_EMAC_OFFSET;
  2698. port_reg_blk_sz = 0x30;
  2699. emac_reg_blk_sz = 0x40;
  2700. } else {
  2701. dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
  2702. gbe_dev->ss_version);
  2703. return -EINVAL;
  2704. }
  2705. slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
  2706. (port_reg_blk_sz * port_reg_num);
  2707. slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
  2708. (emac_reg_blk_sz * slave->slave_num);
  2709. if (IS_SS_ID_VER_14(gbe_dev)) {
  2710. /* Initialize slave port register offsets */
  2711. GBE_SET_REG_OFS(slave, port_regs, port_vlan);
  2712. GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
  2713. GBE_SET_REG_OFS(slave, port_regs, sa_lo);
  2714. GBE_SET_REG_OFS(slave, port_regs, sa_hi);
  2715. GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
  2716. GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
  2717. GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
  2718. GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
  2719. GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
  2720. /* Initialize EMAC register offsets */
  2721. GBE_SET_REG_OFS(slave, emac_regs, mac_control);
  2722. GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
  2723. GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
  2724. } else if (IS_SS_ID_MU(gbe_dev)) {
  2725. /* Initialize slave port register offsets */
  2726. GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
  2727. GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
  2728. GBENU_SET_REG_OFS(slave, port_regs, rx_pri_map);
  2729. GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
  2730. GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
  2731. GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
  2732. GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
  2733. GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
  2734. GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
  2735. GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
  2736. GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
  2737. /* Initialize EMAC register offsets */
  2738. GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
  2739. GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
  2740. } else if (IS_SS_ID_XGBE(gbe_dev)) {
  2741. /* Initialize slave port register offsets */
  2742. XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
  2743. XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
  2744. XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
  2745. XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
  2746. XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
  2747. XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
  2748. XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
  2749. XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
  2750. XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
  2751. /* Initialize EMAC register offsets */
  2752. XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
  2753. XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
  2754. XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
  2755. }
  2756. atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
  2757. init_slave_ts_ctl(slave);
  2758. return 0;
  2759. }
  2760. static void init_secondary_ports(struct gbe_priv *gbe_dev,
  2761. struct device_node *node)
  2762. {
  2763. struct device *dev = gbe_dev->dev;
  2764. phy_interface_t phy_mode;
  2765. struct gbe_priv **priv;
  2766. struct device_node *port;
  2767. struct gbe_slave *slave;
  2768. bool mac_phy_link = false;
  2769. for_each_child_of_node(node, port) {
  2770. slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
  2771. if (!slave) {
  2772. dev_err(dev, "memory alloc failed for secondary port(%pOFn), skipping...\n",
  2773. port);
  2774. continue;
  2775. }
  2776. if (init_slave(gbe_dev, slave, port)) {
  2777. dev_err(dev,
  2778. "Failed to initialize secondary port(%pOFn), skipping...\n",
  2779. port);
  2780. devm_kfree(dev, slave);
  2781. continue;
  2782. }
  2783. if (!IS_SS_ID_2U(gbe_dev))
  2784. gbe_sgmii_config(gbe_dev, slave);
  2785. gbe_port_reset(slave);
  2786. gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
  2787. list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
  2788. gbe_dev->num_slaves++;
  2789. if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
  2790. (slave->link_interface == XGMII_LINK_MAC_PHY))
  2791. mac_phy_link = true;
  2792. slave->open = true;
  2793. if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
  2794. of_node_put(port);
  2795. break;
  2796. }
  2797. }
  2798. /* of_phy_connect() is needed only for MAC-PHY interface */
  2799. if (!mac_phy_link)
  2800. return;
  2801. /* Allocate dummy netdev device for attaching to phy device */
  2802. gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
  2803. NET_NAME_UNKNOWN, ether_setup);
  2804. if (!gbe_dev->dummy_ndev) {
  2805. dev_err(dev,
  2806. "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
  2807. return;
  2808. }
  2809. priv = netdev_priv(gbe_dev->dummy_ndev);
  2810. *priv = gbe_dev;
  2811. if (slave->link_interface == SGMII_LINK_MAC_PHY) {
  2812. phy_mode = PHY_INTERFACE_MODE_SGMII;
  2813. slave->phy_port_t = PORT_MII;
  2814. } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
  2815. phy_mode = PHY_INTERFACE_MODE_RGMII;
  2816. slave->phy_port_t = PORT_MII;
  2817. } else {
  2818. phy_mode = PHY_INTERFACE_MODE_NA;
  2819. slave->phy_port_t = PORT_FIBRE;
  2820. }
  2821. for_each_sec_slave(slave, gbe_dev) {
  2822. if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
  2823. (slave->link_interface != RGMII_LINK_MAC_PHY) &&
  2824. (slave->link_interface != XGMII_LINK_MAC_PHY))
  2825. continue;
  2826. slave->phy =
  2827. of_phy_connect(gbe_dev->dummy_ndev,
  2828. slave->phy_node,
  2829. gbe_adjust_link_sec_slaves,
  2830. 0, phy_mode);
  2831. if (!slave->phy) {
  2832. dev_err(dev, "phy not found for slave %d\n",
  2833. slave->slave_num);
  2834. } else {
  2835. dev_dbg(dev, "phy found: id is: 0x%s\n",
  2836. phydev_name(slave->phy));
  2837. phy_start(slave->phy);
  2838. }
  2839. }
  2840. }
  2841. static void free_secondary_ports(struct gbe_priv *gbe_dev)
  2842. {
  2843. struct gbe_slave *slave;
  2844. while (!list_empty(&gbe_dev->secondary_slaves)) {
  2845. slave = first_sec_slave(gbe_dev);
  2846. if (slave->phy)
  2847. phy_disconnect(slave->phy);
  2848. list_del(&slave->slave_list);
  2849. }
  2850. if (gbe_dev->dummy_ndev)
  2851. free_netdev(gbe_dev->dummy_ndev);
  2852. }
  2853. static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
  2854. struct device_node *node)
  2855. {
  2856. struct resource res;
  2857. void __iomem *regs;
  2858. int ret, i;
  2859. ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
  2860. if (ret) {
  2861. dev_err(gbe_dev->dev,
  2862. "Can't xlate xgbe of node(%pOFn) ss address at %d\n",
  2863. node, XGBE_SS_REG_INDEX);
  2864. return ret;
  2865. }
  2866. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2867. if (IS_ERR(regs)) {
  2868. dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
  2869. return PTR_ERR(regs);
  2870. }
  2871. gbe_dev->ss_regs = regs;
  2872. ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
  2873. if (ret) {
  2874. dev_err(gbe_dev->dev,
  2875. "Can't xlate xgbe of node(%pOFn) sm address at %d\n",
  2876. node, XGBE_SM_REG_INDEX);
  2877. return ret;
  2878. }
  2879. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2880. if (IS_ERR(regs)) {
  2881. dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
  2882. return PTR_ERR(regs);
  2883. }
  2884. gbe_dev->switch_regs = regs;
  2885. ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
  2886. if (ret) {
  2887. dev_err(gbe_dev->dev,
  2888. "Can't xlate xgbe serdes of node(%pOFn) address at %d\n",
  2889. node, XGBE_SERDES_REG_INDEX);
  2890. return ret;
  2891. }
  2892. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2893. if (IS_ERR(regs)) {
  2894. dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
  2895. return PTR_ERR(regs);
  2896. }
  2897. gbe_dev->xgbe_serdes_regs = regs;
  2898. gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
  2899. gbe_dev->et_stats = xgbe10_et_stats;
  2900. gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
  2901. gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
  2902. gbe_dev->num_et_stats, sizeof(u64),
  2903. GFP_KERNEL);
  2904. if (!gbe_dev->hw_stats) {
  2905. dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
  2906. return -ENOMEM;
  2907. }
  2908. gbe_dev->hw_stats_prev =
  2909. devm_kcalloc(gbe_dev->dev,
  2910. gbe_dev->num_et_stats, sizeof(u32),
  2911. GFP_KERNEL);
  2912. if (!gbe_dev->hw_stats_prev) {
  2913. dev_err(gbe_dev->dev,
  2914. "hw_stats_prev memory allocation failed\n");
  2915. return -ENOMEM;
  2916. }
  2917. gbe_dev->ss_version = XGBE_SS_VERSION_10;
  2918. gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
  2919. XGBE10_SGMII_MODULE_OFFSET;
  2920. gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
  2921. for (i = 0; i < gbe_dev->max_num_ports; i++)
  2922. gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
  2923. XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
  2924. gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
  2925. gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
  2926. gbe_dev->ale_ports = gbe_dev->max_num_ports;
  2927. gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
  2928. gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
  2929. /* Subsystem registers */
  2930. XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
  2931. XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
  2932. /* Switch module registers */
  2933. XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
  2934. XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
  2935. XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
  2936. XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
  2937. XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
  2938. /* Host port registers */
  2939. XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
  2940. XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
  2941. XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
  2942. return 0;
  2943. }
  2944. static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
  2945. struct device_node *node)
  2946. {
  2947. struct resource res;
  2948. void __iomem *regs;
  2949. int ret;
  2950. ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
  2951. if (ret) {
  2952. dev_err(gbe_dev->dev,
  2953. "Can't translate of node(%pOFn) of gbe ss address at %d\n",
  2954. node, GBE_SS_REG_INDEX);
  2955. return ret;
  2956. }
  2957. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2958. if (IS_ERR(regs)) {
  2959. dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
  2960. return PTR_ERR(regs);
  2961. }
  2962. gbe_dev->ss_regs = regs;
  2963. gbe_dev->ss_version = readl(gbe_dev->ss_regs);
  2964. return 0;
  2965. }
  2966. static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
  2967. struct device_node *node)
  2968. {
  2969. struct resource res;
  2970. void __iomem *regs;
  2971. int i, ret;
  2972. ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
  2973. if (ret) {
  2974. dev_err(gbe_dev->dev,
  2975. "Can't translate of gbe node(%pOFn) address at index %d\n",
  2976. node, GBE_SGMII34_REG_INDEX);
  2977. return ret;
  2978. }
  2979. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2980. if (IS_ERR(regs)) {
  2981. dev_err(gbe_dev->dev,
  2982. "Failed to map gbe sgmii port34 register base\n");
  2983. return PTR_ERR(regs);
  2984. }
  2985. gbe_dev->sgmii_port34_regs = regs;
  2986. ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
  2987. if (ret) {
  2988. dev_err(gbe_dev->dev,
  2989. "Can't translate of gbe node(%pOFn) address at index %d\n",
  2990. node, GBE_SM_REG_INDEX);
  2991. return ret;
  2992. }
  2993. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  2994. if (IS_ERR(regs)) {
  2995. dev_err(gbe_dev->dev,
  2996. "Failed to map gbe switch module register base\n");
  2997. return PTR_ERR(regs);
  2998. }
  2999. gbe_dev->switch_regs = regs;
  3000. gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
  3001. gbe_dev->et_stats = gbe13_et_stats;
  3002. gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
  3003. gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
  3004. gbe_dev->num_et_stats, sizeof(u64),
  3005. GFP_KERNEL);
  3006. if (!gbe_dev->hw_stats) {
  3007. dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
  3008. return -ENOMEM;
  3009. }
  3010. gbe_dev->hw_stats_prev =
  3011. devm_kcalloc(gbe_dev->dev,
  3012. gbe_dev->num_et_stats, sizeof(u32),
  3013. GFP_KERNEL);
  3014. if (!gbe_dev->hw_stats_prev) {
  3015. dev_err(gbe_dev->dev,
  3016. "hw_stats_prev memory allocation failed\n");
  3017. return -ENOMEM;
  3018. }
  3019. gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
  3020. gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
  3021. /* K2HK has only 2 hw stats modules visible at a time, so
  3022. * module 0 & 2 points to one base and
  3023. * module 1 & 3 points to the other base
  3024. */
  3025. for (i = 0; i < gbe_dev->max_num_slaves; i++) {
  3026. gbe_dev->hw_stats_regs[i] =
  3027. gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
  3028. (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
  3029. }
  3030. gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
  3031. gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
  3032. gbe_dev->ale_ports = gbe_dev->max_num_ports;
  3033. gbe_dev->host_port = GBE13_HOST_PORT_NUM;
  3034. gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
  3035. /* Subsystem registers */
  3036. GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
  3037. /* Switch module registers */
  3038. GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
  3039. GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
  3040. GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
  3041. GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
  3042. GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
  3043. GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
  3044. /* Host port registers */
  3045. GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
  3046. GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
  3047. return 0;
  3048. }
  3049. static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
  3050. struct device_node *node)
  3051. {
  3052. struct resource res;
  3053. void __iomem *regs;
  3054. int i, ret;
  3055. gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
  3056. gbe_dev->et_stats = gbenu_et_stats;
  3057. if (IS_SS_ID_MU(gbe_dev))
  3058. gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
  3059. (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
  3060. else
  3061. gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
  3062. GBENU_ET_STATS_PORT_SIZE;
  3063. gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
  3064. gbe_dev->num_et_stats, sizeof(u64),
  3065. GFP_KERNEL);
  3066. if (!gbe_dev->hw_stats) {
  3067. dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
  3068. return -ENOMEM;
  3069. }
  3070. gbe_dev->hw_stats_prev =
  3071. devm_kcalloc(gbe_dev->dev,
  3072. gbe_dev->num_et_stats, sizeof(u32),
  3073. GFP_KERNEL);
  3074. if (!gbe_dev->hw_stats_prev) {
  3075. dev_err(gbe_dev->dev,
  3076. "hw_stats_prev memory allocation failed\n");
  3077. return -ENOMEM;
  3078. }
  3079. ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
  3080. if (ret) {
  3081. dev_err(gbe_dev->dev,
  3082. "Can't translate of gbenu node(%pOFn) addr at index %d\n",
  3083. node, GBENU_SM_REG_INDEX);
  3084. return ret;
  3085. }
  3086. regs = devm_ioremap_resource(gbe_dev->dev, &res);
  3087. if (IS_ERR(regs)) {
  3088. dev_err(gbe_dev->dev,
  3089. "Failed to map gbenu switch module register base\n");
  3090. return PTR_ERR(regs);
  3091. }
  3092. gbe_dev->switch_regs = regs;
  3093. if (!IS_SS_ID_2U(gbe_dev))
  3094. gbe_dev->sgmii_port_regs =
  3095. gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
  3096. /* Although sgmii modules are mem mapped to one contiguous
  3097. * region on GBENU devices, setting sgmii_port34_regs allows
  3098. * consistent code when accessing sgmii api
  3099. */
  3100. gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
  3101. (2 * GBENU_SGMII_MODULE_SIZE);
  3102. gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
  3103. for (i = 0; i < (gbe_dev->max_num_ports); i++)
  3104. gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
  3105. GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
  3106. gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
  3107. gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
  3108. gbe_dev->ale_ports = gbe_dev->max_num_ports;
  3109. gbe_dev->host_port = GBENU_HOST_PORT_NUM;
  3110. gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
  3111. /* Subsystem registers */
  3112. GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
  3113. /* ok to set for MU, but used by 2U only */
  3114. GBENU_SET_REG_OFS(gbe_dev, ss_regs, rgmii_status);
  3115. /* Switch module registers */
  3116. GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
  3117. GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
  3118. GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
  3119. GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
  3120. /* Host port registers */
  3121. GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
  3122. GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
  3123. /* For NU only. 2U does not need tx_pri_map.
  3124. * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
  3125. * while 2U has only 1 such thread
  3126. */
  3127. GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
  3128. return 0;
  3129. }
  3130. static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
  3131. struct device_node *node, void **inst_priv)
  3132. {
  3133. struct device_node *interfaces, *interface, *cpts_node;
  3134. struct device_node *secondary_ports;
  3135. struct cpsw_ale_params ale_params;
  3136. struct gbe_priv *gbe_dev;
  3137. u32 slave_num;
  3138. int i, ret = 0;
  3139. if (!node) {
  3140. dev_err(dev, "device tree info unavailable\n");
  3141. return -ENODEV;
  3142. }
  3143. gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
  3144. if (!gbe_dev)
  3145. return -ENOMEM;
  3146. if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
  3147. of_device_is_compatible(node, "ti,netcp-gbe")) {
  3148. gbe_dev->max_num_slaves = 4;
  3149. } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
  3150. gbe_dev->max_num_slaves = 8;
  3151. } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
  3152. gbe_dev->max_num_slaves = 1;
  3153. gbe_module.set_rx_mode = gbe_set_rx_mode;
  3154. } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
  3155. gbe_dev->max_num_slaves = 2;
  3156. } else {
  3157. dev_err(dev, "device tree node for unknown device\n");
  3158. return -EINVAL;
  3159. }
  3160. gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
  3161. gbe_dev->dev = dev;
  3162. gbe_dev->netcp_device = netcp_device;
  3163. gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
  3164. /* init the hw stats lock */
  3165. spin_lock_init(&gbe_dev->hw_stats_lock);
  3166. if (of_find_property(node, "enable-ale", NULL)) {
  3167. gbe_dev->enable_ale = true;
  3168. dev_info(dev, "ALE enabled\n");
  3169. } else {
  3170. gbe_dev->enable_ale = false;
  3171. dev_dbg(dev, "ALE bypass enabled*\n");
  3172. }
  3173. ret = of_property_read_u32(node, "tx-queue",
  3174. &gbe_dev->tx_queue_id);
  3175. if (ret < 0) {
  3176. dev_err(dev, "missing tx_queue parameter\n");
  3177. gbe_dev->tx_queue_id = GBE_TX_QUEUE;
  3178. }
  3179. ret = of_property_read_string(node, "tx-channel",
  3180. &gbe_dev->dma_chan_name);
  3181. if (ret < 0) {
  3182. dev_err(dev, "missing \"tx-channel\" parameter\n");
  3183. return -EINVAL;
  3184. }
  3185. if (of_node_name_eq(node, "gbe")) {
  3186. ret = get_gbe_resource_version(gbe_dev, node);
  3187. if (ret)
  3188. return ret;
  3189. dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
  3190. if (IS_SS_ID_VER_14(gbe_dev))
  3191. ret = set_gbe_ethss14_priv(gbe_dev, node);
  3192. else if (IS_SS_ID_MU(gbe_dev))
  3193. ret = set_gbenu_ethss_priv(gbe_dev, node);
  3194. else
  3195. ret = -ENODEV;
  3196. } else if (of_node_name_eq(node, "xgbe")) {
  3197. ret = set_xgbe_ethss10_priv(gbe_dev, node);
  3198. if (ret)
  3199. return ret;
  3200. ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
  3201. gbe_dev->ss_regs);
  3202. } else {
  3203. dev_err(dev, "unknown GBE node(%pOFn)\n", node);
  3204. ret = -ENODEV;
  3205. }
  3206. if (ret)
  3207. return ret;
  3208. interfaces = of_get_child_by_name(node, "interfaces");
  3209. if (!interfaces)
  3210. dev_err(dev, "could not find interfaces\n");
  3211. ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
  3212. gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
  3213. if (ret) {
  3214. of_node_put(interfaces);
  3215. return ret;
  3216. }
  3217. ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
  3218. if (ret) {
  3219. of_node_put(interfaces);
  3220. return ret;
  3221. }
  3222. /* Create network interfaces */
  3223. INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
  3224. for_each_child_of_node(interfaces, interface) {
  3225. ret = of_property_read_u32(interface, "slave-port", &slave_num);
  3226. if (ret) {
  3227. dev_err(dev, "missing slave-port parameter, skipping interface configuration for %pOFn\n",
  3228. interface);
  3229. continue;
  3230. }
  3231. gbe_dev->num_slaves++;
  3232. if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
  3233. of_node_put(interface);
  3234. break;
  3235. }
  3236. }
  3237. of_node_put(interfaces);
  3238. if (!gbe_dev->num_slaves)
  3239. dev_warn(dev, "No network interface configured\n");
  3240. /* Initialize Secondary slave ports */
  3241. secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
  3242. INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
  3243. if (secondary_ports && (gbe_dev->num_slaves < gbe_dev->max_num_slaves))
  3244. init_secondary_ports(gbe_dev, secondary_ports);
  3245. of_node_put(secondary_ports);
  3246. if (!gbe_dev->num_slaves) {
  3247. dev_err(dev,
  3248. "No network interface or secondary ports configured\n");
  3249. ret = -ENODEV;
  3250. goto free_sec_ports;
  3251. }
  3252. memset(&ale_params, 0, sizeof(ale_params));
  3253. ale_params.dev = gbe_dev->dev;
  3254. ale_params.ale_regs = gbe_dev->ale_reg;
  3255. ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
  3256. ale_params.ale_ports = gbe_dev->ale_ports;
  3257. ale_params.dev_id = "cpsw";
  3258. if (IS_SS_ID_NU(gbe_dev))
  3259. ale_params.dev_id = "66ak2el";
  3260. else if (IS_SS_ID_2U(gbe_dev))
  3261. ale_params.dev_id = "66ak2g";
  3262. else if (IS_SS_ID_XGBE(gbe_dev))
  3263. ale_params.dev_id = "66ak2h-xgbe";
  3264. gbe_dev->ale = cpsw_ale_create(&ale_params);
  3265. if (IS_ERR(gbe_dev->ale)) {
  3266. dev_err(gbe_dev->dev, "error initializing ale engine\n");
  3267. ret = PTR_ERR(gbe_dev->ale);
  3268. goto free_sec_ports;
  3269. } else {
  3270. dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
  3271. }
  3272. cpts_node = of_get_child_by_name(node, "cpts");
  3273. if (!cpts_node)
  3274. cpts_node = of_node_get(node);
  3275. gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg,
  3276. cpts_node, 0);
  3277. of_node_put(cpts_node);
  3278. if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
  3279. ret = PTR_ERR(gbe_dev->cpts);
  3280. goto free_sec_ports;
  3281. }
  3282. /* initialize host port */
  3283. gbe_init_host_port(gbe_dev);
  3284. spin_lock_bh(&gbe_dev->hw_stats_lock);
  3285. for (i = 0; i < gbe_dev->num_stats_mods; i++) {
  3286. if (IS_SS_ID_VER_14(gbe_dev))
  3287. gbe_reset_mod_stats_ver14(gbe_dev, i);
  3288. else
  3289. gbe_reset_mod_stats(gbe_dev, i);
  3290. }
  3291. spin_unlock_bh(&gbe_dev->hw_stats_lock);
  3292. timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
  3293. gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
  3294. add_timer(&gbe_dev->timer);
  3295. *inst_priv = gbe_dev;
  3296. return 0;
  3297. free_sec_ports:
  3298. free_secondary_ports(gbe_dev);
  3299. return ret;
  3300. }
  3301. static int gbe_attach(void *inst_priv, struct net_device *ndev,
  3302. struct device_node *node, void **intf_priv)
  3303. {
  3304. struct gbe_priv *gbe_dev = inst_priv;
  3305. struct gbe_intf *gbe_intf;
  3306. int ret;
  3307. if (!node) {
  3308. dev_err(gbe_dev->dev, "interface node not available\n");
  3309. return -ENODEV;
  3310. }
  3311. gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
  3312. if (!gbe_intf)
  3313. return -ENOMEM;
  3314. gbe_intf->ndev = ndev;
  3315. gbe_intf->dev = gbe_dev->dev;
  3316. gbe_intf->gbe_dev = gbe_dev;
  3317. gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
  3318. sizeof(*gbe_intf->slave),
  3319. GFP_KERNEL);
  3320. if (!gbe_intf->slave) {
  3321. ret = -ENOMEM;
  3322. goto fail;
  3323. }
  3324. if (init_slave(gbe_dev, gbe_intf->slave, node)) {
  3325. ret = -ENODEV;
  3326. goto fail;
  3327. }
  3328. gbe_intf->tx_pipe = gbe_dev->tx_pipe;
  3329. ndev->ethtool_ops = &keystone_ethtool_ops;
  3330. list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
  3331. *intf_priv = gbe_intf;
  3332. return 0;
  3333. fail:
  3334. if (gbe_intf->slave)
  3335. devm_kfree(gbe_dev->dev, gbe_intf->slave);
  3336. if (gbe_intf)
  3337. devm_kfree(gbe_dev->dev, gbe_intf);
  3338. return ret;
  3339. }
  3340. static int gbe_release(void *intf_priv)
  3341. {
  3342. struct gbe_intf *gbe_intf = intf_priv;
  3343. gbe_intf->ndev->ethtool_ops = NULL;
  3344. list_del(&gbe_intf->gbe_intf_list);
  3345. devm_kfree(gbe_intf->dev, gbe_intf->slave);
  3346. devm_kfree(gbe_intf->dev, gbe_intf);
  3347. return 0;
  3348. }
  3349. static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
  3350. {
  3351. struct gbe_priv *gbe_dev = inst_priv;
  3352. del_timer_sync(&gbe_dev->timer);
  3353. cpts_release(gbe_dev->cpts);
  3354. cpsw_ale_stop(gbe_dev->ale);
  3355. netcp_txpipe_close(&gbe_dev->tx_pipe);
  3356. free_secondary_ports(gbe_dev);
  3357. if (!list_empty(&gbe_dev->gbe_intf_head))
  3358. dev_alert(gbe_dev->dev,
  3359. "unreleased ethss interfaces present\n");
  3360. return 0;
  3361. }
  3362. static struct netcp_module gbe_module = {
  3363. .name = GBE_MODULE_NAME,
  3364. .owner = THIS_MODULE,
  3365. .primary = true,
  3366. .probe = gbe_probe,
  3367. .open = gbe_open,
  3368. .close = gbe_close,
  3369. .remove = gbe_remove,
  3370. .attach = gbe_attach,
  3371. .release = gbe_release,
  3372. .add_addr = gbe_add_addr,
  3373. .del_addr = gbe_del_addr,
  3374. .add_vid = gbe_add_vid,
  3375. .del_vid = gbe_del_vid,
  3376. .ioctl = gbe_ioctl,
  3377. };
  3378. static struct netcp_module xgbe_module = {
  3379. .name = XGBE_MODULE_NAME,
  3380. .owner = THIS_MODULE,
  3381. .primary = true,
  3382. .probe = gbe_probe,
  3383. .open = gbe_open,
  3384. .close = gbe_close,
  3385. .remove = gbe_remove,
  3386. .attach = gbe_attach,
  3387. .release = gbe_release,
  3388. .add_addr = gbe_add_addr,
  3389. .del_addr = gbe_del_addr,
  3390. .add_vid = gbe_add_vid,
  3391. .del_vid = gbe_del_vid,
  3392. .ioctl = gbe_ioctl,
  3393. };
  3394. static int __init keystone_gbe_init(void)
  3395. {
  3396. int ret;
  3397. ret = netcp_register_module(&gbe_module);
  3398. if (ret)
  3399. return ret;
  3400. ret = netcp_register_module(&xgbe_module);
  3401. if (ret)
  3402. return ret;
  3403. return 0;
  3404. }
  3405. module_init(keystone_gbe_init);
  3406. static void __exit keystone_gbe_exit(void)
  3407. {
  3408. netcp_unregister_module(&gbe_module);
  3409. netcp_unregister_module(&xgbe_module);
  3410. }
  3411. module_exit(keystone_gbe_exit);
  3412. MODULE_LICENSE("GPL v2");
  3413. MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
  3414. MODULE_AUTHOR("Sandeep Nair <[email protected]");