spectrum.c 152 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331
  1. // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
  2. /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
  3. #include <linux/kernel.h>
  4. #include <linux/module.h>
  5. #include <linux/types.h>
  6. #include <linux/pci.h>
  7. #include <linux/netdevice.h>
  8. #include <linux/etherdevice.h>
  9. #include <linux/ethtool.h>
  10. #include <linux/slab.h>
  11. #include <linux/device.h>
  12. #include <linux/skbuff.h>
  13. #include <linux/if_vlan.h>
  14. #include <linux/if_bridge.h>
  15. #include <linux/workqueue.h>
  16. #include <linux/jiffies.h>
  17. #include <linux/bitops.h>
  18. #include <linux/list.h>
  19. #include <linux/notifier.h>
  20. #include <linux/dcbnl.h>
  21. #include <linux/inetdevice.h>
  22. #include <linux/netlink.h>
  23. #include <linux/jhash.h>
  24. #include <linux/log2.h>
  25. #include <linux/refcount.h>
  26. #include <linux/rhashtable.h>
  27. #include <net/switchdev.h>
  28. #include <net/pkt_cls.h>
  29. #include <net/netevent.h>
  30. #include <net/addrconf.h>
  31. #include <linux/ptp_classify.h>
  32. #include "spectrum.h"
  33. #include "pci.h"
  34. #include "core.h"
  35. #include "core_env.h"
  36. #include "reg.h"
  37. #include "port.h"
  38. #include "trap.h"
  39. #include "txheader.h"
  40. #include "spectrum_cnt.h"
  41. #include "spectrum_dpipe.h"
  42. #include "spectrum_acl_flex_actions.h"
  43. #include "spectrum_span.h"
  44. #include "spectrum_ptp.h"
  45. #include "spectrum_trap.h"
  46. #define MLXSW_SP_FWREV_MINOR 2010
  47. #define MLXSW_SP_FWREV_SUBMINOR 1006
  48. #define MLXSW_SP1_FWREV_MAJOR 13
  49. #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
  50. static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
  51. .major = MLXSW_SP1_FWREV_MAJOR,
  52. .minor = MLXSW_SP_FWREV_MINOR,
  53. .subminor = MLXSW_SP_FWREV_SUBMINOR,
  54. .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
  55. };
  56. #define MLXSW_SP1_FW_FILENAME \
  57. "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
  58. "." __stringify(MLXSW_SP_FWREV_MINOR) \
  59. "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
  60. #define MLXSW_SP2_FWREV_MAJOR 29
  61. static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
  62. .major = MLXSW_SP2_FWREV_MAJOR,
  63. .minor = MLXSW_SP_FWREV_MINOR,
  64. .subminor = MLXSW_SP_FWREV_SUBMINOR,
  65. };
  66. #define MLXSW_SP2_FW_FILENAME \
  67. "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
  68. "." __stringify(MLXSW_SP_FWREV_MINOR) \
  69. "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
  70. #define MLXSW_SP3_FWREV_MAJOR 30
  71. static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
  72. .major = MLXSW_SP3_FWREV_MAJOR,
  73. .minor = MLXSW_SP_FWREV_MINOR,
  74. .subminor = MLXSW_SP_FWREV_SUBMINOR,
  75. };
  76. #define MLXSW_SP3_FW_FILENAME \
  77. "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
  78. "." __stringify(MLXSW_SP_FWREV_MINOR) \
  79. "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
  80. #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
  81. "mellanox/lc_ini_bundle_" \
  82. __stringify(MLXSW_SP_FWREV_MINOR) "_" \
  83. __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
  84. static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
  85. static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
  86. static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
  87. static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4";
  88. static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
  89. 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
  90. };
  91. static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
  92. 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
  93. };
  94. /* tx_hdr_version
  95. * Tx header version.
  96. * Must be set to 1.
  97. */
  98. MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
  99. /* tx_hdr_ctl
  100. * Packet control type.
  101. * 0 - Ethernet control (e.g. EMADs, LACP)
  102. * 1 - Ethernet data
  103. */
  104. MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
  105. /* tx_hdr_proto
  106. * Packet protocol type. Must be set to 1 (Ethernet).
  107. */
  108. MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
  109. /* tx_hdr_rx_is_router
  110. * Packet is sent from the router. Valid for data packets only.
  111. */
  112. MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
  113. /* tx_hdr_fid_valid
  114. * Indicates if the 'fid' field is valid and should be used for
  115. * forwarding lookup. Valid for data packets only.
  116. */
  117. MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
  118. /* tx_hdr_swid
  119. * Switch partition ID. Must be set to 0.
  120. */
  121. MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
  122. /* tx_hdr_control_tclass
  123. * Indicates if the packet should use the control TClass and not one
  124. * of the data TClasses.
  125. */
  126. MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
  127. /* tx_hdr_etclass
  128. * Egress TClass to be used on the egress device on the egress port.
  129. */
  130. MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
  131. /* tx_hdr_port_mid
  132. * Destination local port for unicast packets.
  133. * Destination multicast ID for multicast packets.
  134. *
  135. * Control packets are directed to a specific egress port, while data
  136. * packets are transmitted through the CPU port (0) into the switch partition,
  137. * where forwarding rules are applied.
  138. */
  139. MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
  140. /* tx_hdr_fid
  141. * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
  142. * set, otherwise calculated based on the packet's VID using VID to FID mapping.
  143. * Valid for data packets only.
  144. */
  145. MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
  146. /* tx_hdr_type
  147. * 0 - Data packets
  148. * 6 - Control packets
  149. */
  150. MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
  151. int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
  152. unsigned int counter_index, u64 *packets,
  153. u64 *bytes)
  154. {
  155. char mgpc_pl[MLXSW_REG_MGPC_LEN];
  156. int err;
  157. mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
  158. MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
  159. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
  160. if (err)
  161. return err;
  162. if (packets)
  163. *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
  164. if (bytes)
  165. *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
  166. return 0;
  167. }
  168. static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
  169. unsigned int counter_index)
  170. {
  171. char mgpc_pl[MLXSW_REG_MGPC_LEN];
  172. mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
  173. MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
  174. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
  175. }
  176. int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
  177. unsigned int *p_counter_index)
  178. {
  179. int err;
  180. err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
  181. p_counter_index);
  182. if (err)
  183. return err;
  184. err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
  185. if (err)
  186. goto err_counter_clear;
  187. return 0;
  188. err_counter_clear:
  189. mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
  190. *p_counter_index);
  191. return err;
  192. }
  193. void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
  194. unsigned int counter_index)
  195. {
  196. mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
  197. counter_index);
  198. }
  199. void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
  200. const struct mlxsw_tx_info *tx_info)
  201. {
  202. char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
  203. memset(txhdr, 0, MLXSW_TXHDR_LEN);
  204. mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
  205. mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
  206. mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
  207. mlxsw_tx_hdr_swid_set(txhdr, 0);
  208. mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
  209. mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
  210. mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
  211. }
  212. int
  213. mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
  214. struct mlxsw_sp_port *mlxsw_sp_port,
  215. struct sk_buff *skb,
  216. const struct mlxsw_tx_info *tx_info)
  217. {
  218. char *txhdr;
  219. u16 max_fid;
  220. int err;
  221. if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
  222. err = -ENOMEM;
  223. goto err_skb_cow_head;
  224. }
  225. if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
  226. err = -EIO;
  227. goto err_res_valid;
  228. }
  229. max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
  230. txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
  231. memset(txhdr, 0, MLXSW_TXHDR_LEN);
  232. mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
  233. mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
  234. mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
  235. mlxsw_tx_hdr_fid_valid_set(txhdr, true);
  236. mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
  237. mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
  238. return 0;
  239. err_res_valid:
  240. err_skb_cow_head:
  241. this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
  242. dev_kfree_skb_any(skb);
  243. return err;
  244. }
  245. static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
  246. {
  247. unsigned int type;
  248. if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
  249. return false;
  250. type = ptp_classify_raw(skb);
  251. return !!ptp_parse_header(skb, type);
  252. }
  253. static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
  254. struct mlxsw_sp_port *mlxsw_sp_port,
  255. struct sk_buff *skb,
  256. const struct mlxsw_tx_info *tx_info)
  257. {
  258. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  259. /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
  260. * need special handling and cannot be transmitted as regular control
  261. * packets.
  262. */
  263. if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
  264. return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
  265. mlxsw_sp_port, skb,
  266. tx_info);
  267. if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
  268. this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
  269. dev_kfree_skb_any(skb);
  270. return -ENOMEM;
  271. }
  272. mlxsw_sp_txhdr_construct(skb, tx_info);
  273. return 0;
  274. }
  275. enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
  276. {
  277. switch (state) {
  278. case BR_STATE_FORWARDING:
  279. return MLXSW_REG_SPMS_STATE_FORWARDING;
  280. case BR_STATE_LEARNING:
  281. return MLXSW_REG_SPMS_STATE_LEARNING;
  282. case BR_STATE_LISTENING:
  283. case BR_STATE_DISABLED:
  284. case BR_STATE_BLOCKING:
  285. return MLXSW_REG_SPMS_STATE_DISCARDING;
  286. default:
  287. BUG();
  288. }
  289. }
  290. int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
  291. u8 state)
  292. {
  293. enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
  294. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  295. char *spms_pl;
  296. int err;
  297. spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
  298. if (!spms_pl)
  299. return -ENOMEM;
  300. mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
  301. mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
  302. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
  303. kfree(spms_pl);
  304. return err;
  305. }
  306. static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
  307. {
  308. char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
  309. int err;
  310. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
  311. if (err)
  312. return err;
  313. mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
  314. return 0;
  315. }
  316. int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
  317. bool is_up)
  318. {
  319. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  320. char paos_pl[MLXSW_REG_PAOS_LEN];
  321. mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
  322. is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
  323. MLXSW_PORT_ADMIN_STATUS_DOWN);
  324. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
  325. }
  326. static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
  327. const unsigned char *addr)
  328. {
  329. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  330. char ppad_pl[MLXSW_REG_PPAD_LEN];
  331. mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
  332. mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
  333. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
  334. }
  335. static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
  336. {
  337. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  338. eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
  339. mlxsw_sp_port->local_port);
  340. return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
  341. mlxsw_sp_port->dev->dev_addr);
  342. }
  343. static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
  344. {
  345. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  346. char pmtu_pl[MLXSW_REG_PMTU_LEN];
  347. int err;
  348. mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
  349. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
  350. if (err)
  351. return err;
  352. *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
  353. return 0;
  354. }
  355. static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
  356. {
  357. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  358. char pmtu_pl[MLXSW_REG_PMTU_LEN];
  359. mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
  360. if (mtu > mlxsw_sp_port->max_mtu)
  361. return -EINVAL;
  362. mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
  363. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
  364. }
  365. static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
  366. u16 local_port, u8 swid)
  367. {
  368. char pspa_pl[MLXSW_REG_PSPA_LEN];
  369. mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
  370. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
  371. }
  372. int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
  373. {
  374. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  375. char svpe_pl[MLXSW_REG_SVPE_LEN];
  376. mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
  377. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
  378. }
  379. int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
  380. bool learn_enable)
  381. {
  382. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  383. char *spvmlr_pl;
  384. int err;
  385. spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
  386. if (!spvmlr_pl)
  387. return -ENOMEM;
  388. mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
  389. learn_enable);
  390. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
  391. kfree(spvmlr_pl);
  392. return err;
  393. }
  394. int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
  395. {
  396. switch (ethtype) {
  397. case ETH_P_8021Q:
  398. *p_sver_type = 0;
  399. break;
  400. case ETH_P_8021AD:
  401. *p_sver_type = 1;
  402. break;
  403. default:
  404. return -EINVAL;
  405. }
  406. return 0;
  407. }
  408. int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
  409. u16 ethtype)
  410. {
  411. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  412. char spevet_pl[MLXSW_REG_SPEVET_LEN];
  413. u8 sver_type;
  414. int err;
  415. err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
  416. if (err)
  417. return err;
  418. mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type);
  419. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl);
  420. }
  421. static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
  422. u16 vid, u16 ethtype)
  423. {
  424. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  425. char spvid_pl[MLXSW_REG_SPVID_LEN];
  426. u8 sver_type;
  427. int err;
  428. err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
  429. if (err)
  430. return err;
  431. mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
  432. sver_type);
  433. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
  434. }
  435. static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
  436. bool allow)
  437. {
  438. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  439. char spaft_pl[MLXSW_REG_SPAFT_LEN];
  440. mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
  441. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
  442. }
  443. int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
  444. u16 ethtype)
  445. {
  446. int err;
  447. if (!vid) {
  448. err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
  449. if (err)
  450. return err;
  451. } else {
  452. err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
  453. if (err)
  454. return err;
  455. err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
  456. if (err)
  457. goto err_port_allow_untagged_set;
  458. }
  459. mlxsw_sp_port->pvid = vid;
  460. return 0;
  461. err_port_allow_untagged_set:
  462. __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
  463. return err;
  464. }
  465. static int
  466. mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
  467. {
  468. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  469. char sspr_pl[MLXSW_REG_SSPR_LEN];
  470. mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
  471. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
  472. }
  473. static int
  474. mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
  475. u16 local_port, char *pmlp_pl,
  476. struct mlxsw_sp_port_mapping *port_mapping)
  477. {
  478. bool separate_rxtx;
  479. u8 first_lane;
  480. u8 slot_index;
  481. u8 module;
  482. u8 width;
  483. int i;
  484. module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
  485. slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
  486. width = mlxsw_reg_pmlp_width_get(pmlp_pl);
  487. separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
  488. first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
  489. if (width && !is_power_of_2(width)) {
  490. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
  491. local_port);
  492. return -EINVAL;
  493. }
  494. for (i = 0; i < width; i++) {
  495. if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
  496. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
  497. local_port);
  498. return -EINVAL;
  499. }
  500. if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
  501. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
  502. local_port);
  503. return -EINVAL;
  504. }
  505. if (separate_rxtx &&
  506. mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
  507. mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
  508. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
  509. local_port);
  510. return -EINVAL;
  511. }
  512. if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
  513. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
  514. local_port);
  515. return -EINVAL;
  516. }
  517. }
  518. port_mapping->module = module;
  519. port_mapping->slot_index = slot_index;
  520. port_mapping->width = width;
  521. port_mapping->module_width = width;
  522. port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
  523. return 0;
  524. }
  525. static int
  526. mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
  527. struct mlxsw_sp_port_mapping *port_mapping)
  528. {
  529. char pmlp_pl[MLXSW_REG_PMLP_LEN];
  530. int err;
  531. mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
  532. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
  533. if (err)
  534. return err;
  535. return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
  536. pmlp_pl, port_mapping);
  537. }
  538. static int
  539. mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
  540. const struct mlxsw_sp_port_mapping *port_mapping)
  541. {
  542. char pmlp_pl[MLXSW_REG_PMLP_LEN];
  543. int i, err;
  544. mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
  545. port_mapping->module);
  546. mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
  547. mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
  548. for (i = 0; i < port_mapping->width; i++) {
  549. mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
  550. port_mapping->slot_index);
  551. mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
  552. mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
  553. }
  554. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
  555. if (err)
  556. goto err_pmlp_write;
  557. return 0;
  558. err_pmlp_write:
  559. mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
  560. port_mapping->module);
  561. return err;
  562. }
  563. static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
  564. u8 slot_index, u8 module)
  565. {
  566. char pmlp_pl[MLXSW_REG_PMLP_LEN];
  567. mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
  568. mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
  569. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
  570. mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
  571. }
  572. static int mlxsw_sp_port_open(struct net_device *dev)
  573. {
  574. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  575. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  576. int err;
  577. err = mlxsw_env_module_port_up(mlxsw_sp->core,
  578. mlxsw_sp_port->mapping.slot_index,
  579. mlxsw_sp_port->mapping.module);
  580. if (err)
  581. return err;
  582. err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
  583. if (err)
  584. goto err_port_admin_status_set;
  585. netif_start_queue(dev);
  586. return 0;
  587. err_port_admin_status_set:
  588. mlxsw_env_module_port_down(mlxsw_sp->core,
  589. mlxsw_sp_port->mapping.slot_index,
  590. mlxsw_sp_port->mapping.module);
  591. return err;
  592. }
  593. static int mlxsw_sp_port_stop(struct net_device *dev)
  594. {
  595. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  596. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  597. netif_stop_queue(dev);
  598. mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
  599. mlxsw_env_module_port_down(mlxsw_sp->core,
  600. mlxsw_sp_port->mapping.slot_index,
  601. mlxsw_sp_port->mapping.module);
  602. return 0;
  603. }
  604. static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
  605. struct net_device *dev)
  606. {
  607. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  608. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  609. struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
  610. const struct mlxsw_tx_info tx_info = {
  611. .local_port = mlxsw_sp_port->local_port,
  612. .is_emad = false,
  613. };
  614. u64 len;
  615. int err;
  616. memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
  617. if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
  618. return NETDEV_TX_BUSY;
  619. if (eth_skb_pad(skb)) {
  620. this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
  621. return NETDEV_TX_OK;
  622. }
  623. err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
  624. &tx_info);
  625. if (err)
  626. return NETDEV_TX_OK;
  627. /* TX header is consumed by HW on the way so we shouldn't count its
  628. * bytes as being sent.
  629. */
  630. len = skb->len - MLXSW_TXHDR_LEN;
  631. /* Due to a race we might fail here because of a full queue. In that
  632. * unlikely case we simply drop the packet.
  633. */
  634. err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
  635. if (!err) {
  636. pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
  637. u64_stats_update_begin(&pcpu_stats->syncp);
  638. pcpu_stats->tx_packets++;
  639. pcpu_stats->tx_bytes += len;
  640. u64_stats_update_end(&pcpu_stats->syncp);
  641. } else {
  642. this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
  643. dev_kfree_skb_any(skb);
  644. }
  645. return NETDEV_TX_OK;
  646. }
  647. static void mlxsw_sp_set_rx_mode(struct net_device *dev)
  648. {
  649. }
  650. static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
  651. {
  652. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  653. struct sockaddr *addr = p;
  654. int err;
  655. if (!is_valid_ether_addr(addr->sa_data))
  656. return -EADDRNOTAVAIL;
  657. err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
  658. if (err)
  659. return err;
  660. eth_hw_addr_set(dev, addr->sa_data);
  661. return 0;
  662. }
  663. static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
  664. {
  665. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  666. struct mlxsw_sp_hdroom orig_hdroom;
  667. struct mlxsw_sp_hdroom hdroom;
  668. int err;
  669. orig_hdroom = *mlxsw_sp_port->hdroom;
  670. hdroom = orig_hdroom;
  671. hdroom.mtu = mtu;
  672. mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
  673. err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
  674. if (err) {
  675. netdev_err(dev, "Failed to configure port's headroom\n");
  676. return err;
  677. }
  678. err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
  679. if (err)
  680. goto err_port_mtu_set;
  681. dev->mtu = mtu;
  682. return 0;
  683. err_port_mtu_set:
  684. mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
  685. return err;
  686. }
  687. static int
  688. mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
  689. struct rtnl_link_stats64 *stats)
  690. {
  691. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  692. struct mlxsw_sp_port_pcpu_stats *p;
  693. u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
  694. u32 tx_dropped = 0;
  695. unsigned int start;
  696. int i;
  697. for_each_possible_cpu(i) {
  698. p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
  699. do {
  700. start = u64_stats_fetch_begin_irq(&p->syncp);
  701. rx_packets = p->rx_packets;
  702. rx_bytes = p->rx_bytes;
  703. tx_packets = p->tx_packets;
  704. tx_bytes = p->tx_bytes;
  705. } while (u64_stats_fetch_retry_irq(&p->syncp, start));
  706. stats->rx_packets += rx_packets;
  707. stats->rx_bytes += rx_bytes;
  708. stats->tx_packets += tx_packets;
  709. stats->tx_bytes += tx_bytes;
  710. /* tx_dropped is u32, updated without syncp protection. */
  711. tx_dropped += p->tx_dropped;
  712. }
  713. stats->tx_dropped = tx_dropped;
  714. return 0;
  715. }
  716. static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
  717. {
  718. switch (attr_id) {
  719. case IFLA_OFFLOAD_XSTATS_CPU_HIT:
  720. return true;
  721. }
  722. return false;
  723. }
  724. static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
  725. void *sp)
  726. {
  727. switch (attr_id) {
  728. case IFLA_OFFLOAD_XSTATS_CPU_HIT:
  729. return mlxsw_sp_port_get_sw_stats64(dev, sp);
  730. }
  731. return -EINVAL;
  732. }
  733. int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
  734. int prio, char *ppcnt_pl)
  735. {
  736. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  737. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  738. mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
  739. return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
  740. }
  741. static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
  742. struct rtnl_link_stats64 *stats)
  743. {
  744. char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
  745. int err;
  746. err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
  747. 0, ppcnt_pl);
  748. if (err)
  749. goto out;
  750. stats->tx_packets =
  751. mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
  752. stats->rx_packets =
  753. mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
  754. stats->tx_bytes =
  755. mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
  756. stats->rx_bytes =
  757. mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
  758. stats->multicast =
  759. mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
  760. stats->rx_crc_errors =
  761. mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
  762. stats->rx_frame_errors =
  763. mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
  764. stats->rx_length_errors = (
  765. mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
  766. mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
  767. mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
  768. stats->rx_errors = (stats->rx_crc_errors +
  769. stats->rx_frame_errors + stats->rx_length_errors);
  770. out:
  771. return err;
  772. }
  773. static void
  774. mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
  775. struct mlxsw_sp_port_xstats *xstats)
  776. {
  777. char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
  778. int err, i;
  779. err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
  780. ppcnt_pl);
  781. if (!err)
  782. xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
  783. for (i = 0; i < TC_MAX_QUEUE; i++) {
  784. err = mlxsw_sp_port_get_stats_raw(dev,
  785. MLXSW_REG_PPCNT_TC_CONG_CNT,
  786. i, ppcnt_pl);
  787. if (err)
  788. goto tc_cnt;
  789. xstats->wred_drop[i] =
  790. mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
  791. xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
  792. tc_cnt:
  793. err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
  794. i, ppcnt_pl);
  795. if (err)
  796. continue;
  797. xstats->backlog[i] =
  798. mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
  799. xstats->tail_drop[i] =
  800. mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
  801. }
  802. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  803. err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
  804. i, ppcnt_pl);
  805. if (err)
  806. continue;
  807. xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
  808. xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
  809. }
  810. }
  811. static void update_stats_cache(struct work_struct *work)
  812. {
  813. struct mlxsw_sp_port *mlxsw_sp_port =
  814. container_of(work, struct mlxsw_sp_port,
  815. periodic_hw_stats.update_dw.work);
  816. if (!netif_carrier_ok(mlxsw_sp_port->dev))
  817. /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
  818. * necessary when port goes down.
  819. */
  820. goto out;
  821. mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
  822. &mlxsw_sp_port->periodic_hw_stats.stats);
  823. mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
  824. &mlxsw_sp_port->periodic_hw_stats.xstats);
  825. out:
  826. mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
  827. MLXSW_HW_STATS_UPDATE_TIME);
  828. }
  829. /* Return the stats from a cache that is updated periodically,
  830. * as this function might get called in an atomic context.
  831. */
  832. static void
  833. mlxsw_sp_port_get_stats64(struct net_device *dev,
  834. struct rtnl_link_stats64 *stats)
  835. {
  836. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  837. memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
  838. }
  839. static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
  840. u16 vid_begin, u16 vid_end,
  841. bool is_member, bool untagged)
  842. {
  843. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  844. char *spvm_pl;
  845. int err;
  846. spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
  847. if (!spvm_pl)
  848. return -ENOMEM;
  849. mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
  850. vid_end, is_member, untagged);
  851. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
  852. kfree(spvm_pl);
  853. return err;
  854. }
  855. int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
  856. u16 vid_end, bool is_member, bool untagged)
  857. {
  858. u16 vid, vid_e;
  859. int err;
  860. for (vid = vid_begin; vid <= vid_end;
  861. vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
  862. vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
  863. vid_end);
  864. err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
  865. is_member, untagged);
  866. if (err)
  867. return err;
  868. }
  869. return 0;
  870. }
  871. static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
  872. bool flush_default)
  873. {
  874. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
  875. list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
  876. &mlxsw_sp_port->vlans_list, list) {
  877. if (!flush_default &&
  878. mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
  879. continue;
  880. mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
  881. }
  882. }
  883. static void
  884. mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
  885. {
  886. if (mlxsw_sp_port_vlan->bridge_port)
  887. mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
  888. else if (mlxsw_sp_port_vlan->fid)
  889. mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
  890. }
  891. struct mlxsw_sp_port_vlan *
  892. mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
  893. {
  894. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  895. bool untagged = vid == MLXSW_SP_DEFAULT_VID;
  896. int err;
  897. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
  898. if (mlxsw_sp_port_vlan)
  899. return ERR_PTR(-EEXIST);
  900. err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
  901. if (err)
  902. return ERR_PTR(err);
  903. mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
  904. if (!mlxsw_sp_port_vlan) {
  905. err = -ENOMEM;
  906. goto err_port_vlan_alloc;
  907. }
  908. mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
  909. mlxsw_sp_port_vlan->vid = vid;
  910. list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
  911. return mlxsw_sp_port_vlan;
  912. err_port_vlan_alloc:
  913. mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
  914. return ERR_PTR(err);
  915. }
  916. void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
  917. {
  918. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
  919. u16 vid = mlxsw_sp_port_vlan->vid;
  920. mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
  921. list_del(&mlxsw_sp_port_vlan->list);
  922. kfree(mlxsw_sp_port_vlan);
  923. mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
  924. }
  925. static int mlxsw_sp_port_add_vid(struct net_device *dev,
  926. __be16 __always_unused proto, u16 vid)
  927. {
  928. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  929. /* VLAN 0 is added to HW filter when device goes up, but it is
  930. * reserved in our case, so simply return.
  931. */
  932. if (!vid)
  933. return 0;
  934. return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
  935. }
  936. static int mlxsw_sp_port_kill_vid(struct net_device *dev,
  937. __be16 __always_unused proto, u16 vid)
  938. {
  939. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  940. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  941. /* VLAN 0 is removed from HW filter when device goes down, but
  942. * it is reserved in our case, so simply return.
  943. */
  944. if (!vid)
  945. return 0;
  946. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
  947. if (!mlxsw_sp_port_vlan)
  948. return 0;
  949. mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
  950. return 0;
  951. }
  952. static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
  953. struct flow_block_offload *f)
  954. {
  955. switch (f->binder_type) {
  956. case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
  957. return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
  958. case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
  959. return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
  960. case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
  961. return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
  962. case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
  963. return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
  964. default:
  965. return -EOPNOTSUPP;
  966. }
  967. }
  968. static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
  969. void *type_data)
  970. {
  971. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  972. switch (type) {
  973. case TC_SETUP_BLOCK:
  974. return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
  975. case TC_SETUP_QDISC_RED:
  976. return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
  977. case TC_SETUP_QDISC_PRIO:
  978. return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
  979. case TC_SETUP_QDISC_ETS:
  980. return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
  981. case TC_SETUP_QDISC_TBF:
  982. return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
  983. case TC_SETUP_QDISC_FIFO:
  984. return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
  985. default:
  986. return -EOPNOTSUPP;
  987. }
  988. }
  989. static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
  990. {
  991. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  992. if (!enable) {
  993. if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
  994. mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
  995. netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
  996. return -EINVAL;
  997. }
  998. mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
  999. mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
  1000. } else {
  1001. mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
  1002. mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
  1003. }
  1004. return 0;
  1005. }
  1006. static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
  1007. {
  1008. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1009. char pplr_pl[MLXSW_REG_PPLR_LEN];
  1010. int err;
  1011. if (netif_running(dev))
  1012. mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
  1013. mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
  1014. err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
  1015. pplr_pl);
  1016. if (netif_running(dev))
  1017. mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
  1018. return err;
  1019. }
  1020. typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
  1021. static int mlxsw_sp_handle_feature(struct net_device *dev,
  1022. netdev_features_t wanted_features,
  1023. netdev_features_t feature,
  1024. mlxsw_sp_feature_handler feature_handler)
  1025. {
  1026. netdev_features_t changes = wanted_features ^ dev->features;
  1027. bool enable = !!(wanted_features & feature);
  1028. int err;
  1029. if (!(changes & feature))
  1030. return 0;
  1031. err = feature_handler(dev, enable);
  1032. if (err) {
  1033. netdev_err(dev, "%s feature %pNF failed, err %d\n",
  1034. enable ? "Enable" : "Disable", &feature, err);
  1035. return err;
  1036. }
  1037. if (enable)
  1038. dev->features |= feature;
  1039. else
  1040. dev->features &= ~feature;
  1041. return 0;
  1042. }
  1043. static int mlxsw_sp_set_features(struct net_device *dev,
  1044. netdev_features_t features)
  1045. {
  1046. netdev_features_t oper_features = dev->features;
  1047. int err = 0;
  1048. err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
  1049. mlxsw_sp_feature_hw_tc);
  1050. err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
  1051. mlxsw_sp_feature_loopback);
  1052. if (err) {
  1053. dev->features = oper_features;
  1054. return -EINVAL;
  1055. }
  1056. return 0;
  1057. }
  1058. static struct devlink_port *
  1059. mlxsw_sp_port_get_devlink_port(struct net_device *dev)
  1060. {
  1061. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1062. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1063. return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
  1064. mlxsw_sp_port->local_port);
  1065. }
  1066. static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1067. struct ifreq *ifr)
  1068. {
  1069. struct hwtstamp_config config;
  1070. int err;
  1071. if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
  1072. return -EFAULT;
  1073. err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
  1074. &config);
  1075. if (err)
  1076. return err;
  1077. if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
  1078. return -EFAULT;
  1079. return 0;
  1080. }
  1081. static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
  1082. struct ifreq *ifr)
  1083. {
  1084. struct hwtstamp_config config;
  1085. int err;
  1086. err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
  1087. &config);
  1088. if (err)
  1089. return err;
  1090. if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
  1091. return -EFAULT;
  1092. return 0;
  1093. }
  1094. static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
  1095. {
  1096. struct hwtstamp_config config = {0};
  1097. mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
  1098. }
  1099. static int
  1100. mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  1101. {
  1102. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  1103. switch (cmd) {
  1104. case SIOCSHWTSTAMP:
  1105. return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
  1106. case SIOCGHWTSTAMP:
  1107. return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
  1108. default:
  1109. return -EOPNOTSUPP;
  1110. }
  1111. }
  1112. static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
  1113. .ndo_open = mlxsw_sp_port_open,
  1114. .ndo_stop = mlxsw_sp_port_stop,
  1115. .ndo_start_xmit = mlxsw_sp_port_xmit,
  1116. .ndo_setup_tc = mlxsw_sp_setup_tc,
  1117. .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
  1118. .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
  1119. .ndo_change_mtu = mlxsw_sp_port_change_mtu,
  1120. .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
  1121. .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
  1122. .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
  1123. .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
  1124. .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
  1125. .ndo_set_features = mlxsw_sp_set_features,
  1126. .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port,
  1127. .ndo_eth_ioctl = mlxsw_sp_port_ioctl,
  1128. };
  1129. static int
  1130. mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
  1131. {
  1132. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1133. u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
  1134. const struct mlxsw_sp_port_type_speed_ops *ops;
  1135. char ptys_pl[MLXSW_REG_PTYS_LEN];
  1136. u32 eth_proto_cap_masked;
  1137. int err;
  1138. ops = mlxsw_sp->port_type_speed_ops;
  1139. /* Set advertised speeds to speeds supported by both the driver
  1140. * and the device.
  1141. */
  1142. ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
  1143. 0, false);
  1144. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
  1145. if (err)
  1146. return err;
  1147. ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
  1148. &eth_proto_admin, &eth_proto_oper);
  1149. eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
  1150. ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
  1151. eth_proto_cap_masked,
  1152. mlxsw_sp_port->link.autoneg);
  1153. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
  1154. }
  1155. int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
  1156. {
  1157. const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
  1158. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1159. char ptys_pl[MLXSW_REG_PTYS_LEN];
  1160. u32 eth_proto_oper;
  1161. int err;
  1162. port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
  1163. port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
  1164. mlxsw_sp_port->local_port, 0,
  1165. false);
  1166. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
  1167. if (err)
  1168. return err;
  1169. port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
  1170. &eth_proto_oper);
  1171. *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
  1172. return 0;
  1173. }
  1174. int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1175. enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
  1176. bool dwrr, u8 dwrr_weight)
  1177. {
  1178. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1179. char qeec_pl[MLXSW_REG_QEEC_LEN];
  1180. mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
  1181. next_index);
  1182. mlxsw_reg_qeec_de_set(qeec_pl, true);
  1183. mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
  1184. mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
  1185. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
  1186. }
  1187. int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1188. enum mlxsw_reg_qeec_hr hr, u8 index,
  1189. u8 next_index, u32 maxrate, u8 burst_size)
  1190. {
  1191. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1192. char qeec_pl[MLXSW_REG_QEEC_LEN];
  1193. mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
  1194. next_index);
  1195. mlxsw_reg_qeec_mase_set(qeec_pl, true);
  1196. mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
  1197. mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
  1198. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
  1199. }
  1200. static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1201. enum mlxsw_reg_qeec_hr hr, u8 index,
  1202. u8 next_index, u32 minrate)
  1203. {
  1204. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1205. char qeec_pl[MLXSW_REG_QEEC_LEN];
  1206. mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
  1207. next_index);
  1208. mlxsw_reg_qeec_mise_set(qeec_pl, true);
  1209. mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
  1210. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
  1211. }
  1212. int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1213. u8 switch_prio, u8 tclass)
  1214. {
  1215. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1216. char qtct_pl[MLXSW_REG_QTCT_LEN];
  1217. mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
  1218. tclass);
  1219. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
  1220. }
  1221. static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
  1222. {
  1223. int err, i;
  1224. /* Setup the elements hierarcy, so that each TC is linked to
  1225. * one subgroup, which are all member in the same group.
  1226. */
  1227. err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
  1228. MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
  1229. if (err)
  1230. return err;
  1231. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  1232. err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
  1233. MLXSW_REG_QEEC_HR_SUBGROUP, i,
  1234. 0, false, 0);
  1235. if (err)
  1236. return err;
  1237. }
  1238. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  1239. err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
  1240. MLXSW_REG_QEEC_HR_TC, i, i,
  1241. false, 0);
  1242. if (err)
  1243. return err;
  1244. err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
  1245. MLXSW_REG_QEEC_HR_TC,
  1246. i + 8, i,
  1247. true, 100);
  1248. if (err)
  1249. return err;
  1250. }
  1251. /* Make sure the max shaper is disabled in all hierarchies that support
  1252. * it. Note that this disables ptps (PTP shaper), but that is intended
  1253. * for the initial configuration.
  1254. */
  1255. err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
  1256. MLXSW_REG_QEEC_HR_PORT, 0, 0,
  1257. MLXSW_REG_QEEC_MAS_DIS, 0);
  1258. if (err)
  1259. return err;
  1260. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  1261. err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
  1262. MLXSW_REG_QEEC_HR_SUBGROUP,
  1263. i, 0,
  1264. MLXSW_REG_QEEC_MAS_DIS, 0);
  1265. if (err)
  1266. return err;
  1267. }
  1268. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  1269. err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
  1270. MLXSW_REG_QEEC_HR_TC,
  1271. i, i,
  1272. MLXSW_REG_QEEC_MAS_DIS, 0);
  1273. if (err)
  1274. return err;
  1275. err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
  1276. MLXSW_REG_QEEC_HR_TC,
  1277. i + 8, i,
  1278. MLXSW_REG_QEEC_MAS_DIS, 0);
  1279. if (err)
  1280. return err;
  1281. }
  1282. /* Configure the min shaper for multicast TCs. */
  1283. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  1284. err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
  1285. MLXSW_REG_QEEC_HR_TC,
  1286. i + 8, i,
  1287. MLXSW_REG_QEEC_MIS_MIN);
  1288. if (err)
  1289. return err;
  1290. }
  1291. /* Map all priorities to traffic class 0. */
  1292. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  1293. err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
  1294. if (err)
  1295. return err;
  1296. }
  1297. return 0;
  1298. }
  1299. static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1300. bool enable)
  1301. {
  1302. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1303. char qtctm_pl[MLXSW_REG_QTCTM_LEN];
  1304. mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
  1305. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
  1306. }
  1307. static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
  1308. {
  1309. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1310. u8 slot_index = mlxsw_sp_port->mapping.slot_index;
  1311. u8 module = mlxsw_sp_port->mapping.module;
  1312. u64 overheat_counter;
  1313. int err;
  1314. err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
  1315. module, &overheat_counter);
  1316. if (err)
  1317. return err;
  1318. mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
  1319. return 0;
  1320. }
  1321. int
  1322. mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
  1323. bool is_8021ad_tagged,
  1324. bool is_8021q_tagged)
  1325. {
  1326. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1327. char spvc_pl[MLXSW_REG_SPVC_LEN];
  1328. mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
  1329. is_8021ad_tagged, is_8021q_tagged);
  1330. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
  1331. }
  1332. static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
  1333. u16 local_port, u8 *port_number,
  1334. u8 *split_port_subnumber,
  1335. u8 *slot_index)
  1336. {
  1337. char pllp_pl[MLXSW_REG_PLLP_LEN];
  1338. int err;
  1339. mlxsw_reg_pllp_pack(pllp_pl, local_port);
  1340. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
  1341. if (err)
  1342. return err;
  1343. mlxsw_reg_pllp_unpack(pllp_pl, port_number,
  1344. split_port_subnumber, slot_index);
  1345. return 0;
  1346. }
  1347. static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
  1348. bool split,
  1349. struct mlxsw_sp_port_mapping *port_mapping)
  1350. {
  1351. struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  1352. struct mlxsw_sp_port *mlxsw_sp_port;
  1353. u32 lanes = port_mapping->width;
  1354. u8 split_port_subnumber;
  1355. struct net_device *dev;
  1356. u8 port_number;
  1357. u8 slot_index;
  1358. bool splittable;
  1359. int err;
  1360. err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
  1361. if (err) {
  1362. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
  1363. local_port);
  1364. return err;
  1365. }
  1366. err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
  1367. if (err) {
  1368. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
  1369. local_port);
  1370. goto err_port_swid_set;
  1371. }
  1372. err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
  1373. &split_port_subnumber, &slot_index);
  1374. if (err) {
  1375. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
  1376. local_port);
  1377. goto err_port_label_info_get;
  1378. }
  1379. splittable = lanes > 1 && !split;
  1380. err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
  1381. port_number, split, split_port_subnumber,
  1382. splittable, lanes, mlxsw_sp->base_mac,
  1383. sizeof(mlxsw_sp->base_mac));
  1384. if (err) {
  1385. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
  1386. local_port);
  1387. goto err_core_port_init;
  1388. }
  1389. dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
  1390. if (!dev) {
  1391. err = -ENOMEM;
  1392. goto err_alloc_etherdev;
  1393. }
  1394. SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
  1395. dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
  1396. mlxsw_sp_port = netdev_priv(dev);
  1397. mlxsw_sp_port->dev = dev;
  1398. mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
  1399. mlxsw_sp_port->local_port = local_port;
  1400. mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
  1401. mlxsw_sp_port->split = split;
  1402. mlxsw_sp_port->mapping = *port_mapping;
  1403. mlxsw_sp_port->link.autoneg = 1;
  1404. INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
  1405. mlxsw_sp_port->pcpu_stats =
  1406. netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
  1407. if (!mlxsw_sp_port->pcpu_stats) {
  1408. err = -ENOMEM;
  1409. goto err_alloc_stats;
  1410. }
  1411. INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
  1412. &update_stats_cache);
  1413. dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
  1414. dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
  1415. err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
  1416. if (err) {
  1417. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
  1418. mlxsw_sp_port->local_port);
  1419. goto err_dev_addr_init;
  1420. }
  1421. netif_carrier_off(dev);
  1422. dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
  1423. NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
  1424. dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
  1425. dev->min_mtu = 0;
  1426. dev->max_mtu = ETH_MAX_MTU;
  1427. /* Each packet needs to have a Tx header (metadata) on top all other
  1428. * headers.
  1429. */
  1430. dev->needed_headroom = MLXSW_TXHDR_LEN;
  1431. err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
  1432. if (err) {
  1433. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
  1434. mlxsw_sp_port->local_port);
  1435. goto err_port_system_port_mapping_set;
  1436. }
  1437. err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
  1438. if (err) {
  1439. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
  1440. mlxsw_sp_port->local_port);
  1441. goto err_port_speed_by_width_set;
  1442. }
  1443. err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
  1444. &mlxsw_sp_port->max_speed);
  1445. if (err) {
  1446. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
  1447. mlxsw_sp_port->local_port);
  1448. goto err_max_speed_get;
  1449. }
  1450. err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
  1451. if (err) {
  1452. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
  1453. mlxsw_sp_port->local_port);
  1454. goto err_port_max_mtu_get;
  1455. }
  1456. err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
  1457. if (err) {
  1458. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
  1459. mlxsw_sp_port->local_port);
  1460. goto err_port_mtu_set;
  1461. }
  1462. err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
  1463. if (err)
  1464. goto err_port_admin_status_set;
  1465. err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
  1466. if (err) {
  1467. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
  1468. mlxsw_sp_port->local_port);
  1469. goto err_port_buffers_init;
  1470. }
  1471. err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
  1472. if (err) {
  1473. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
  1474. mlxsw_sp_port->local_port);
  1475. goto err_port_ets_init;
  1476. }
  1477. err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
  1478. if (err) {
  1479. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
  1480. mlxsw_sp_port->local_port);
  1481. goto err_port_tc_mc_mode;
  1482. }
  1483. /* ETS and buffers must be initialized before DCB. */
  1484. err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
  1485. if (err) {
  1486. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
  1487. mlxsw_sp_port->local_port);
  1488. goto err_port_dcb_init;
  1489. }
  1490. err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
  1491. if (err) {
  1492. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
  1493. mlxsw_sp_port->local_port);
  1494. goto err_port_fids_init;
  1495. }
  1496. err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
  1497. if (err) {
  1498. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
  1499. mlxsw_sp_port->local_port);
  1500. goto err_port_qdiscs_init;
  1501. }
  1502. err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
  1503. false);
  1504. if (err) {
  1505. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
  1506. mlxsw_sp_port->local_port);
  1507. goto err_port_vlan_clear;
  1508. }
  1509. err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
  1510. if (err) {
  1511. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
  1512. mlxsw_sp_port->local_port);
  1513. goto err_port_nve_init;
  1514. }
  1515. err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
  1516. ETH_P_8021Q);
  1517. if (err) {
  1518. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
  1519. mlxsw_sp_port->local_port);
  1520. goto err_port_pvid_set;
  1521. }
  1522. mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
  1523. MLXSW_SP_DEFAULT_VID);
  1524. if (IS_ERR(mlxsw_sp_port_vlan)) {
  1525. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
  1526. mlxsw_sp_port->local_port);
  1527. err = PTR_ERR(mlxsw_sp_port_vlan);
  1528. goto err_port_vlan_create;
  1529. }
  1530. mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
  1531. /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
  1532. * only packets with 802.1q header as tagged packets.
  1533. */
  1534. err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
  1535. if (err) {
  1536. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
  1537. local_port);
  1538. goto err_port_vlan_classification_set;
  1539. }
  1540. INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
  1541. mlxsw_sp->ptp_ops->shaper_work);
  1542. mlxsw_sp->ports[local_port] = mlxsw_sp_port;
  1543. err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
  1544. if (err) {
  1545. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
  1546. mlxsw_sp_port->local_port);
  1547. goto err_port_overheat_init_val_set;
  1548. }
  1549. err = register_netdev(dev);
  1550. if (err) {
  1551. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
  1552. mlxsw_sp_port->local_port);
  1553. goto err_register_netdev;
  1554. }
  1555. mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
  1556. mlxsw_sp_port, dev);
  1557. mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
  1558. return 0;
  1559. err_register_netdev:
  1560. err_port_overheat_init_val_set:
  1561. mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
  1562. err_port_vlan_classification_set:
  1563. mlxsw_sp->ports[local_port] = NULL;
  1564. mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
  1565. err_port_vlan_create:
  1566. err_port_pvid_set:
  1567. mlxsw_sp_port_nve_fini(mlxsw_sp_port);
  1568. err_port_nve_init:
  1569. err_port_vlan_clear:
  1570. mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
  1571. err_port_qdiscs_init:
  1572. mlxsw_sp_port_fids_fini(mlxsw_sp_port);
  1573. err_port_fids_init:
  1574. mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
  1575. err_port_dcb_init:
  1576. mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
  1577. err_port_tc_mc_mode:
  1578. err_port_ets_init:
  1579. mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
  1580. err_port_buffers_init:
  1581. err_port_admin_status_set:
  1582. err_port_mtu_set:
  1583. err_port_max_mtu_get:
  1584. err_max_speed_get:
  1585. err_port_speed_by_width_set:
  1586. err_port_system_port_mapping_set:
  1587. err_dev_addr_init:
  1588. free_percpu(mlxsw_sp_port->pcpu_stats);
  1589. err_alloc_stats:
  1590. free_netdev(dev);
  1591. err_alloc_etherdev:
  1592. mlxsw_core_port_fini(mlxsw_sp->core, local_port);
  1593. err_core_port_init:
  1594. err_port_label_info_get:
  1595. mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
  1596. MLXSW_PORT_SWID_DISABLED_PORT);
  1597. err_port_swid_set:
  1598. mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
  1599. port_mapping->slot_index,
  1600. port_mapping->module);
  1601. return err;
  1602. }
  1603. static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
  1604. {
  1605. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
  1606. u8 slot_index = mlxsw_sp_port->mapping.slot_index;
  1607. u8 module = mlxsw_sp_port->mapping.module;
  1608. cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
  1609. cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
  1610. mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
  1611. unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
  1612. mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
  1613. mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
  1614. mlxsw_sp->ports[local_port] = NULL;
  1615. mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
  1616. mlxsw_sp_port_nve_fini(mlxsw_sp_port);
  1617. mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
  1618. mlxsw_sp_port_fids_fini(mlxsw_sp_port);
  1619. mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
  1620. mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
  1621. mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
  1622. free_percpu(mlxsw_sp_port->pcpu_stats);
  1623. WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
  1624. free_netdev(mlxsw_sp_port->dev);
  1625. mlxsw_core_port_fini(mlxsw_sp->core, local_port);
  1626. mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
  1627. MLXSW_PORT_SWID_DISABLED_PORT);
  1628. mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
  1629. }
  1630. static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
  1631. {
  1632. struct mlxsw_sp_port *mlxsw_sp_port;
  1633. int err;
  1634. mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
  1635. if (!mlxsw_sp_port)
  1636. return -ENOMEM;
  1637. mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
  1638. mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
  1639. err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
  1640. mlxsw_sp_port,
  1641. mlxsw_sp->base_mac,
  1642. sizeof(mlxsw_sp->base_mac));
  1643. if (err) {
  1644. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
  1645. goto err_core_cpu_port_init;
  1646. }
  1647. mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
  1648. return 0;
  1649. err_core_cpu_port_init:
  1650. kfree(mlxsw_sp_port);
  1651. return err;
  1652. }
  1653. static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
  1654. {
  1655. struct mlxsw_sp_port *mlxsw_sp_port =
  1656. mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
  1657. mlxsw_core_cpu_port_fini(mlxsw_sp->core);
  1658. mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
  1659. kfree(mlxsw_sp_port);
  1660. }
  1661. static bool mlxsw_sp_local_port_valid(u16 local_port)
  1662. {
  1663. return local_port != MLXSW_PORT_CPU_PORT;
  1664. }
  1665. static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
  1666. {
  1667. if (!mlxsw_sp_local_port_valid(local_port))
  1668. return false;
  1669. return mlxsw_sp->ports[local_port] != NULL;
  1670. }
  1671. static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
  1672. u16 local_port, bool enable)
  1673. {
  1674. char pmecr_pl[MLXSW_REG_PMECR_LEN];
  1675. mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
  1676. enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
  1677. MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
  1678. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
  1679. }
  1680. struct mlxsw_sp_port_mapping_event {
  1681. struct list_head list;
  1682. char pmlp_pl[MLXSW_REG_PMLP_LEN];
  1683. };
  1684. static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
  1685. {
  1686. struct mlxsw_sp_port_mapping_event *event, *next_event;
  1687. struct mlxsw_sp_port_mapping_events *events;
  1688. struct mlxsw_sp_port_mapping port_mapping;
  1689. struct mlxsw_sp *mlxsw_sp;
  1690. struct devlink *devlink;
  1691. LIST_HEAD(event_queue);
  1692. u16 local_port;
  1693. int err;
  1694. events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
  1695. mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
  1696. devlink = priv_to_devlink(mlxsw_sp->core);
  1697. spin_lock_bh(&events->queue_lock);
  1698. list_splice_init(&events->queue, &event_queue);
  1699. spin_unlock_bh(&events->queue_lock);
  1700. list_for_each_entry_safe(event, next_event, &event_queue, list) {
  1701. local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
  1702. err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
  1703. event->pmlp_pl, &port_mapping);
  1704. if (err)
  1705. goto out;
  1706. if (WARN_ON_ONCE(!port_mapping.width))
  1707. goto out;
  1708. devl_lock(devlink);
  1709. if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
  1710. mlxsw_sp_port_create(mlxsw_sp, local_port,
  1711. false, &port_mapping);
  1712. else
  1713. WARN_ON_ONCE(1);
  1714. devl_unlock(devlink);
  1715. mlxsw_sp->port_mapping[local_port] = port_mapping;
  1716. out:
  1717. kfree(event);
  1718. }
  1719. }
  1720. static void
  1721. mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
  1722. char *pmlp_pl, void *priv)
  1723. {
  1724. struct mlxsw_sp_port_mapping_events *events;
  1725. struct mlxsw_sp_port_mapping_event *event;
  1726. struct mlxsw_sp *mlxsw_sp = priv;
  1727. u16 local_port;
  1728. local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
  1729. if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
  1730. return;
  1731. events = &mlxsw_sp->port_mapping_events;
  1732. event = kmalloc(sizeof(*event), GFP_ATOMIC);
  1733. if (!event)
  1734. return;
  1735. memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
  1736. spin_lock(&events->queue_lock);
  1737. list_add_tail(&event->list, &events->queue);
  1738. spin_unlock(&events->queue_lock);
  1739. mlxsw_core_schedule_work(&events->work);
  1740. }
  1741. static void
  1742. __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
  1743. {
  1744. struct mlxsw_sp_port_mapping_event *event, *next_event;
  1745. struct mlxsw_sp_port_mapping_events *events;
  1746. events = &mlxsw_sp->port_mapping_events;
  1747. /* Caller needs to make sure that no new event is going to appear. */
  1748. cancel_work_sync(&events->work);
  1749. list_for_each_entry_safe(event, next_event, &events->queue, list) {
  1750. list_del(&event->list);
  1751. kfree(event);
  1752. }
  1753. }
  1754. static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
  1755. {
  1756. unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
  1757. int i;
  1758. for (i = 1; i < max_ports; i++)
  1759. mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
  1760. /* Make sure all scheduled events are processed */
  1761. __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
  1762. for (i = 1; i < max_ports; i++)
  1763. if (mlxsw_sp_port_created(mlxsw_sp, i))
  1764. mlxsw_sp_port_remove(mlxsw_sp, i);
  1765. mlxsw_sp_cpu_port_remove(mlxsw_sp);
  1766. kfree(mlxsw_sp->ports);
  1767. mlxsw_sp->ports = NULL;
  1768. }
  1769. static void
  1770. mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
  1771. bool (*selector)(void *priv, u16 local_port),
  1772. void *priv)
  1773. {
  1774. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  1775. unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
  1776. int i;
  1777. for (i = 1; i < max_ports; i++)
  1778. if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
  1779. mlxsw_sp_port_remove(mlxsw_sp, i);
  1780. }
  1781. static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
  1782. {
  1783. unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
  1784. struct mlxsw_sp_port_mapping_events *events;
  1785. struct mlxsw_sp_port_mapping *port_mapping;
  1786. size_t alloc_size;
  1787. int i;
  1788. int err;
  1789. alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
  1790. mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
  1791. if (!mlxsw_sp->ports)
  1792. return -ENOMEM;
  1793. events = &mlxsw_sp->port_mapping_events;
  1794. INIT_LIST_HEAD(&events->queue);
  1795. spin_lock_init(&events->queue_lock);
  1796. INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
  1797. for (i = 1; i < max_ports; i++) {
  1798. err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
  1799. if (err)
  1800. goto err_event_enable;
  1801. }
  1802. err = mlxsw_sp_cpu_port_create(mlxsw_sp);
  1803. if (err)
  1804. goto err_cpu_port_create;
  1805. for (i = 1; i < max_ports; i++) {
  1806. port_mapping = &mlxsw_sp->port_mapping[i];
  1807. if (!port_mapping->width)
  1808. continue;
  1809. err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
  1810. if (err)
  1811. goto err_port_create;
  1812. }
  1813. return 0;
  1814. err_port_create:
  1815. for (i--; i >= 1; i--)
  1816. if (mlxsw_sp_port_created(mlxsw_sp, i))
  1817. mlxsw_sp_port_remove(mlxsw_sp, i);
  1818. i = max_ports;
  1819. mlxsw_sp_cpu_port_remove(mlxsw_sp);
  1820. err_cpu_port_create:
  1821. err_event_enable:
  1822. for (i--; i >= 1; i--)
  1823. mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
  1824. /* Make sure all scheduled events are processed */
  1825. __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
  1826. kfree(mlxsw_sp->ports);
  1827. mlxsw_sp->ports = NULL;
  1828. return err;
  1829. }
  1830. static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
  1831. {
  1832. unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
  1833. struct mlxsw_sp_port_mapping *port_mapping;
  1834. int i;
  1835. int err;
  1836. mlxsw_sp->port_mapping = kcalloc(max_ports,
  1837. sizeof(struct mlxsw_sp_port_mapping),
  1838. GFP_KERNEL);
  1839. if (!mlxsw_sp->port_mapping)
  1840. return -ENOMEM;
  1841. for (i = 1; i < max_ports; i++) {
  1842. port_mapping = &mlxsw_sp->port_mapping[i];
  1843. err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
  1844. if (err)
  1845. goto err_port_module_info_get;
  1846. }
  1847. return 0;
  1848. err_port_module_info_get:
  1849. kfree(mlxsw_sp->port_mapping);
  1850. return err;
  1851. }
  1852. static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
  1853. {
  1854. kfree(mlxsw_sp->port_mapping);
  1855. }
  1856. static int
  1857. mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
  1858. struct mlxsw_sp_port_mapping *port_mapping,
  1859. unsigned int count, const char *pmtdb_pl)
  1860. {
  1861. struct mlxsw_sp_port_mapping split_port_mapping;
  1862. int err, i;
  1863. split_port_mapping = *port_mapping;
  1864. split_port_mapping.width /= count;
  1865. for (i = 0; i < count; i++) {
  1866. u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
  1867. if (!mlxsw_sp_local_port_valid(s_local_port))
  1868. continue;
  1869. err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
  1870. true, &split_port_mapping);
  1871. if (err)
  1872. goto err_port_create;
  1873. split_port_mapping.lane += split_port_mapping.width;
  1874. }
  1875. return 0;
  1876. err_port_create:
  1877. for (i--; i >= 0; i--) {
  1878. u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
  1879. if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
  1880. mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
  1881. }
  1882. return err;
  1883. }
  1884. static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
  1885. unsigned int count,
  1886. const char *pmtdb_pl)
  1887. {
  1888. struct mlxsw_sp_port_mapping *port_mapping;
  1889. int i;
  1890. /* Go over original unsplit ports in the gap and recreate them. */
  1891. for (i = 0; i < count; i++) {
  1892. u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
  1893. port_mapping = &mlxsw_sp->port_mapping[local_port];
  1894. if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
  1895. continue;
  1896. mlxsw_sp_port_create(mlxsw_sp, local_port,
  1897. false, port_mapping);
  1898. }
  1899. }
  1900. static struct mlxsw_sp_port *
  1901. mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
  1902. {
  1903. if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
  1904. return mlxsw_sp->ports[local_port];
  1905. return NULL;
  1906. }
  1907. static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
  1908. unsigned int count,
  1909. struct netlink_ext_ack *extack)
  1910. {
  1911. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  1912. struct mlxsw_sp_port_mapping port_mapping;
  1913. struct mlxsw_sp_port *mlxsw_sp_port;
  1914. enum mlxsw_reg_pmtdb_status status;
  1915. char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
  1916. int i;
  1917. int err;
  1918. mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
  1919. if (!mlxsw_sp_port) {
  1920. dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
  1921. local_port);
  1922. NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
  1923. return -EINVAL;
  1924. }
  1925. if (mlxsw_sp_port->split) {
  1926. NL_SET_ERR_MSG_MOD(extack, "Port is already split");
  1927. return -EINVAL;
  1928. }
  1929. mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
  1930. mlxsw_sp_port->mapping.module,
  1931. mlxsw_sp_port->mapping.module_width / count,
  1932. count);
  1933. err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
  1934. if (err) {
  1935. NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
  1936. return err;
  1937. }
  1938. status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
  1939. if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
  1940. NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
  1941. return -EINVAL;
  1942. }
  1943. port_mapping = mlxsw_sp_port->mapping;
  1944. for (i = 0; i < count; i++) {
  1945. u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
  1946. if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
  1947. mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
  1948. }
  1949. err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
  1950. count, pmtdb_pl);
  1951. if (err) {
  1952. dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
  1953. goto err_port_split_create;
  1954. }
  1955. return 0;
  1956. err_port_split_create:
  1957. mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
  1958. return err;
  1959. }
  1960. static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
  1961. struct netlink_ext_ack *extack)
  1962. {
  1963. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  1964. struct mlxsw_sp_port *mlxsw_sp_port;
  1965. char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
  1966. unsigned int count;
  1967. int i;
  1968. int err;
  1969. mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
  1970. if (!mlxsw_sp_port) {
  1971. dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
  1972. local_port);
  1973. NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
  1974. return -EINVAL;
  1975. }
  1976. if (!mlxsw_sp_port->split) {
  1977. NL_SET_ERR_MSG_MOD(extack, "Port was not split");
  1978. return -EINVAL;
  1979. }
  1980. count = mlxsw_sp_port->mapping.module_width /
  1981. mlxsw_sp_port->mapping.width;
  1982. mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
  1983. mlxsw_sp_port->mapping.module,
  1984. mlxsw_sp_port->mapping.module_width / count,
  1985. count);
  1986. err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
  1987. if (err) {
  1988. NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
  1989. return err;
  1990. }
  1991. for (i = 0; i < count; i++) {
  1992. u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
  1993. if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
  1994. mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
  1995. }
  1996. mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
  1997. return 0;
  1998. }
  1999. static void
  2000. mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
  2001. {
  2002. int i;
  2003. for (i = 0; i < TC_MAX_QUEUE; i++)
  2004. mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
  2005. }
  2006. static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
  2007. char *pude_pl, void *priv)
  2008. {
  2009. struct mlxsw_sp *mlxsw_sp = priv;
  2010. struct mlxsw_sp_port *mlxsw_sp_port;
  2011. enum mlxsw_reg_pude_oper_status status;
  2012. u16 local_port;
  2013. local_port = mlxsw_reg_pude_local_port_get(pude_pl);
  2014. if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
  2015. return;
  2016. mlxsw_sp_port = mlxsw_sp->ports[local_port];
  2017. if (!mlxsw_sp_port)
  2018. return;
  2019. status = mlxsw_reg_pude_oper_status_get(pude_pl);
  2020. if (status == MLXSW_PORT_OPER_STATUS_UP) {
  2021. netdev_info(mlxsw_sp_port->dev, "link up\n");
  2022. netif_carrier_on(mlxsw_sp_port->dev);
  2023. mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
  2024. } else {
  2025. netdev_info(mlxsw_sp_port->dev, "link down\n");
  2026. netif_carrier_off(mlxsw_sp_port->dev);
  2027. mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
  2028. }
  2029. }
  2030. static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
  2031. char *mtpptr_pl, bool ingress)
  2032. {
  2033. u16 local_port;
  2034. u8 num_rec;
  2035. int i;
  2036. local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
  2037. num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
  2038. for (i = 0; i < num_rec; i++) {
  2039. u8 domain_number;
  2040. u8 message_type;
  2041. u16 sequence_id;
  2042. u64 timestamp;
  2043. mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
  2044. &domain_number, &sequence_id,
  2045. &timestamp);
  2046. mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
  2047. message_type, domain_number,
  2048. sequence_id, timestamp);
  2049. }
  2050. }
  2051. static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
  2052. char *mtpptr_pl, void *priv)
  2053. {
  2054. struct mlxsw_sp *mlxsw_sp = priv;
  2055. mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
  2056. }
  2057. static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
  2058. char *mtpptr_pl, void *priv)
  2059. {
  2060. struct mlxsw_sp *mlxsw_sp = priv;
  2061. mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
  2062. }
  2063. void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
  2064. u16 local_port, void *priv)
  2065. {
  2066. struct mlxsw_sp *mlxsw_sp = priv;
  2067. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
  2068. struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
  2069. if (unlikely(!mlxsw_sp_port)) {
  2070. dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
  2071. local_port);
  2072. return;
  2073. }
  2074. skb->dev = mlxsw_sp_port->dev;
  2075. pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
  2076. u64_stats_update_begin(&pcpu_stats->syncp);
  2077. pcpu_stats->rx_packets++;
  2078. pcpu_stats->rx_bytes += skb->len;
  2079. u64_stats_update_end(&pcpu_stats->syncp);
  2080. skb->protocol = eth_type_trans(skb, skb->dev);
  2081. netif_receive_skb(skb);
  2082. }
  2083. static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
  2084. void *priv)
  2085. {
  2086. skb->offload_fwd_mark = 1;
  2087. return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
  2088. }
  2089. static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
  2090. u16 local_port, void *priv)
  2091. {
  2092. skb->offload_l3_fwd_mark = 1;
  2093. skb->offload_fwd_mark = 1;
  2094. return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
  2095. }
  2096. void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
  2097. u16 local_port)
  2098. {
  2099. mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
  2100. }
  2101. #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
  2102. MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
  2103. _is_ctrl, SP_##_trap_group, DISCARD)
  2104. #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
  2105. MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
  2106. _is_ctrl, SP_##_trap_group, DISCARD)
  2107. #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
  2108. MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
  2109. _is_ctrl, SP_##_trap_group, DISCARD)
  2110. #define MLXSW_SP_EVENTL(_func, _trap_id) \
  2111. MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
  2112. static const struct mlxsw_listener mlxsw_sp_listener[] = {
  2113. /* Events */
  2114. MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
  2115. /* L2 traps */
  2116. MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
  2117. /* L3 traps */
  2118. MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
  2119. false),
  2120. MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
  2121. MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
  2122. false),
  2123. MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
  2124. ROUTER_EXP, false),
  2125. MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
  2126. ROUTER_EXP, false),
  2127. MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
  2128. ROUTER_EXP, false),
  2129. MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
  2130. ROUTER_EXP, false),
  2131. /* Multicast Router Traps */
  2132. MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
  2133. MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
  2134. /* NVE traps */
  2135. MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
  2136. };
  2137. static const struct mlxsw_listener mlxsw_sp1_listener[] = {
  2138. /* Events */
  2139. MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
  2140. MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
  2141. };
  2142. static const struct mlxsw_listener mlxsw_sp2_listener[] = {
  2143. /* Events */
  2144. MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
  2145. };
  2146. static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
  2147. {
  2148. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  2149. char qpcr_pl[MLXSW_REG_QPCR_LEN];
  2150. enum mlxsw_reg_qpcr_ir_units ir_units;
  2151. int max_cpu_policers;
  2152. bool is_bytes;
  2153. u8 burst_size;
  2154. u32 rate;
  2155. int i, err;
  2156. if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
  2157. return -EIO;
  2158. max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
  2159. ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
  2160. for (i = 0; i < max_cpu_policers; i++) {
  2161. is_bytes = false;
  2162. switch (i) {
  2163. case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
  2164. case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
  2165. case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
  2166. rate = 1024;
  2167. burst_size = 7;
  2168. break;
  2169. default:
  2170. continue;
  2171. }
  2172. __set_bit(i, mlxsw_sp->trap->policers_usage);
  2173. mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
  2174. burst_size);
  2175. err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
  2176. if (err)
  2177. return err;
  2178. }
  2179. return 0;
  2180. }
  2181. static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
  2182. {
  2183. char htgt_pl[MLXSW_REG_HTGT_LEN];
  2184. enum mlxsw_reg_htgt_trap_group i;
  2185. int max_cpu_policers;
  2186. int max_trap_groups;
  2187. u8 priority, tc;
  2188. u16 policer_id;
  2189. int err;
  2190. if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
  2191. return -EIO;
  2192. max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
  2193. max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
  2194. for (i = 0; i < max_trap_groups; i++) {
  2195. policer_id = i;
  2196. switch (i) {
  2197. case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
  2198. case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
  2199. case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
  2200. priority = 1;
  2201. tc = 1;
  2202. break;
  2203. case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
  2204. priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
  2205. tc = MLXSW_REG_HTGT_DEFAULT_TC;
  2206. policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
  2207. break;
  2208. default:
  2209. continue;
  2210. }
  2211. if (max_cpu_policers <= policer_id &&
  2212. policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
  2213. return -EIO;
  2214. mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
  2215. err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
  2216. if (err)
  2217. return err;
  2218. }
  2219. return 0;
  2220. }
  2221. static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
  2222. {
  2223. struct mlxsw_sp_trap *trap;
  2224. u64 max_policers;
  2225. int err;
  2226. if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
  2227. return -EIO;
  2228. max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
  2229. trap = kzalloc(struct_size(trap, policers_usage,
  2230. BITS_TO_LONGS(max_policers)), GFP_KERNEL);
  2231. if (!trap)
  2232. return -ENOMEM;
  2233. trap->max_policers = max_policers;
  2234. mlxsw_sp->trap = trap;
  2235. err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
  2236. if (err)
  2237. goto err_cpu_policers_set;
  2238. err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
  2239. if (err)
  2240. goto err_trap_groups_set;
  2241. err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener,
  2242. ARRAY_SIZE(mlxsw_sp_listener),
  2243. mlxsw_sp);
  2244. if (err)
  2245. goto err_traps_register;
  2246. err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners,
  2247. mlxsw_sp->listeners_count, mlxsw_sp);
  2248. if (err)
  2249. goto err_extra_traps_init;
  2250. return 0;
  2251. err_extra_traps_init:
  2252. mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
  2253. ARRAY_SIZE(mlxsw_sp_listener),
  2254. mlxsw_sp);
  2255. err_traps_register:
  2256. err_trap_groups_set:
  2257. err_cpu_policers_set:
  2258. kfree(trap);
  2259. return err;
  2260. }
  2261. static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
  2262. {
  2263. mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners,
  2264. mlxsw_sp->listeners_count,
  2265. mlxsw_sp);
  2266. mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
  2267. ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp);
  2268. kfree(mlxsw_sp->trap);
  2269. }
  2270. #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
  2271. static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
  2272. {
  2273. char slcr_pl[MLXSW_REG_SLCR_LEN];
  2274. u16 max_lag;
  2275. u32 seed;
  2276. int err;
  2277. seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
  2278. MLXSW_SP_LAG_SEED_INIT);
  2279. mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
  2280. MLXSW_REG_SLCR_LAG_HASH_DMAC |
  2281. MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
  2282. MLXSW_REG_SLCR_LAG_HASH_VLANID |
  2283. MLXSW_REG_SLCR_LAG_HASH_SIP |
  2284. MLXSW_REG_SLCR_LAG_HASH_DIP |
  2285. MLXSW_REG_SLCR_LAG_HASH_SPORT |
  2286. MLXSW_REG_SLCR_LAG_HASH_DPORT |
  2287. MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
  2288. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
  2289. if (err)
  2290. return err;
  2291. err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
  2292. if (err)
  2293. return err;
  2294. if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
  2295. return -EIO;
  2296. mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper),
  2297. GFP_KERNEL);
  2298. if (!mlxsw_sp->lags)
  2299. return -ENOMEM;
  2300. return 0;
  2301. }
  2302. static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
  2303. {
  2304. kfree(mlxsw_sp->lags);
  2305. }
  2306. static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
  2307. .clock_init = mlxsw_sp1_ptp_clock_init,
  2308. .clock_fini = mlxsw_sp1_ptp_clock_fini,
  2309. .init = mlxsw_sp1_ptp_init,
  2310. .fini = mlxsw_sp1_ptp_fini,
  2311. .receive = mlxsw_sp1_ptp_receive,
  2312. .transmitted = mlxsw_sp1_ptp_transmitted,
  2313. .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get,
  2314. .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
  2315. .shaper_work = mlxsw_sp1_ptp_shaper_work,
  2316. .get_ts_info = mlxsw_sp1_ptp_get_ts_info,
  2317. .get_stats_count = mlxsw_sp1_get_stats_count,
  2318. .get_stats_strings = mlxsw_sp1_get_stats_strings,
  2319. .get_stats = mlxsw_sp1_get_stats,
  2320. .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
  2321. };
  2322. static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
  2323. .clock_init = mlxsw_sp2_ptp_clock_init,
  2324. .clock_fini = mlxsw_sp2_ptp_clock_fini,
  2325. .init = mlxsw_sp2_ptp_init,
  2326. .fini = mlxsw_sp2_ptp_fini,
  2327. .receive = mlxsw_sp2_ptp_receive,
  2328. .transmitted = mlxsw_sp2_ptp_transmitted,
  2329. .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
  2330. .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
  2331. .shaper_work = mlxsw_sp2_ptp_shaper_work,
  2332. .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
  2333. .get_stats_count = mlxsw_sp2_get_stats_count,
  2334. .get_stats_strings = mlxsw_sp2_get_stats_strings,
  2335. .get_stats = mlxsw_sp2_get_stats,
  2336. .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
  2337. };
  2338. static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
  2339. .clock_init = mlxsw_sp2_ptp_clock_init,
  2340. .clock_fini = mlxsw_sp2_ptp_clock_fini,
  2341. .init = mlxsw_sp2_ptp_init,
  2342. .fini = mlxsw_sp2_ptp_fini,
  2343. .receive = mlxsw_sp2_ptp_receive,
  2344. .transmitted = mlxsw_sp2_ptp_transmitted,
  2345. .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
  2346. .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
  2347. .shaper_work = mlxsw_sp2_ptp_shaper_work,
  2348. .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
  2349. .get_stats_count = mlxsw_sp2_get_stats_count,
  2350. .get_stats_strings = mlxsw_sp2_get_stats_strings,
  2351. .get_stats = mlxsw_sp2_get_stats,
  2352. .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
  2353. };
  2354. struct mlxsw_sp_sample_trigger_node {
  2355. struct mlxsw_sp_sample_trigger trigger;
  2356. struct mlxsw_sp_sample_params params;
  2357. struct rhash_head ht_node;
  2358. struct rcu_head rcu;
  2359. refcount_t refcount;
  2360. };
  2361. static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
  2362. .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
  2363. .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
  2364. .key_len = sizeof(struct mlxsw_sp_sample_trigger),
  2365. .automatic_shrinking = true,
  2366. };
  2367. static void
  2368. mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
  2369. const struct mlxsw_sp_sample_trigger *trigger)
  2370. {
  2371. memset(key, 0, sizeof(*key));
  2372. key->type = trigger->type;
  2373. key->local_port = trigger->local_port;
  2374. }
  2375. /* RCU read lock must be held */
  2376. struct mlxsw_sp_sample_params *
  2377. mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
  2378. const struct mlxsw_sp_sample_trigger *trigger)
  2379. {
  2380. struct mlxsw_sp_sample_trigger_node *trigger_node;
  2381. struct mlxsw_sp_sample_trigger key;
  2382. mlxsw_sp_sample_trigger_key_init(&key, trigger);
  2383. trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
  2384. mlxsw_sp_sample_trigger_ht_params);
  2385. if (!trigger_node)
  2386. return NULL;
  2387. return &trigger_node->params;
  2388. }
  2389. static int
  2390. mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
  2391. const struct mlxsw_sp_sample_trigger *trigger,
  2392. const struct mlxsw_sp_sample_params *params)
  2393. {
  2394. struct mlxsw_sp_sample_trigger_node *trigger_node;
  2395. int err;
  2396. trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
  2397. if (!trigger_node)
  2398. return -ENOMEM;
  2399. trigger_node->trigger = *trigger;
  2400. trigger_node->params = *params;
  2401. refcount_set(&trigger_node->refcount, 1);
  2402. err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
  2403. &trigger_node->ht_node,
  2404. mlxsw_sp_sample_trigger_ht_params);
  2405. if (err)
  2406. goto err_rhashtable_insert;
  2407. return 0;
  2408. err_rhashtable_insert:
  2409. kfree(trigger_node);
  2410. return err;
  2411. }
  2412. static void
  2413. mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
  2414. struct mlxsw_sp_sample_trigger_node *trigger_node)
  2415. {
  2416. rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
  2417. &trigger_node->ht_node,
  2418. mlxsw_sp_sample_trigger_ht_params);
  2419. kfree_rcu(trigger_node, rcu);
  2420. }
  2421. int
  2422. mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
  2423. const struct mlxsw_sp_sample_trigger *trigger,
  2424. const struct mlxsw_sp_sample_params *params,
  2425. struct netlink_ext_ack *extack)
  2426. {
  2427. struct mlxsw_sp_sample_trigger_node *trigger_node;
  2428. struct mlxsw_sp_sample_trigger key;
  2429. ASSERT_RTNL();
  2430. mlxsw_sp_sample_trigger_key_init(&key, trigger);
  2431. trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
  2432. &key,
  2433. mlxsw_sp_sample_trigger_ht_params);
  2434. if (!trigger_node)
  2435. return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
  2436. params);
  2437. if (trigger_node->trigger.local_port) {
  2438. NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
  2439. return -EINVAL;
  2440. }
  2441. if (trigger_node->params.psample_group != params->psample_group ||
  2442. trigger_node->params.truncate != params->truncate ||
  2443. trigger_node->params.rate != params->rate ||
  2444. trigger_node->params.trunc_size != params->trunc_size) {
  2445. NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
  2446. return -EINVAL;
  2447. }
  2448. refcount_inc(&trigger_node->refcount);
  2449. return 0;
  2450. }
  2451. void
  2452. mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
  2453. const struct mlxsw_sp_sample_trigger *trigger)
  2454. {
  2455. struct mlxsw_sp_sample_trigger_node *trigger_node;
  2456. struct mlxsw_sp_sample_trigger key;
  2457. ASSERT_RTNL();
  2458. mlxsw_sp_sample_trigger_key_init(&key, trigger);
  2459. trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
  2460. &key,
  2461. mlxsw_sp_sample_trigger_ht_params);
  2462. if (!trigger_node)
  2463. return;
  2464. if (!refcount_dec_and_test(&trigger_node->refcount))
  2465. return;
  2466. mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
  2467. }
  2468. static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
  2469. unsigned long event, void *ptr);
  2470. #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96
  2471. #define MLXSW_SP_INCREASED_PARSING_DEPTH 128
  2472. #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789
  2473. static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
  2474. {
  2475. refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0);
  2476. mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
  2477. mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
  2478. mutex_init(&mlxsw_sp->parsing.lock);
  2479. }
  2480. static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
  2481. {
  2482. mutex_destroy(&mlxsw_sp->parsing.lock);
  2483. WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref));
  2484. }
  2485. struct mlxsw_sp_ipv6_addr_node {
  2486. struct in6_addr key;
  2487. struct rhash_head ht_node;
  2488. u32 kvdl_index;
  2489. refcount_t refcount;
  2490. };
  2491. static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
  2492. .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
  2493. .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
  2494. .key_len = sizeof(struct in6_addr),
  2495. .automatic_shrinking = true,
  2496. };
  2497. static int
  2498. mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
  2499. u32 *p_kvdl_index)
  2500. {
  2501. struct mlxsw_sp_ipv6_addr_node *node;
  2502. char rips_pl[MLXSW_REG_RIPS_LEN];
  2503. int err;
  2504. err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
  2505. MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
  2506. p_kvdl_index);
  2507. if (err)
  2508. return err;
  2509. mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6);
  2510. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
  2511. if (err)
  2512. goto err_rips_write;
  2513. node = kzalloc(sizeof(*node), GFP_KERNEL);
  2514. if (!node) {
  2515. err = -ENOMEM;
  2516. goto err_node_alloc;
  2517. }
  2518. node->key = *addr6;
  2519. node->kvdl_index = *p_kvdl_index;
  2520. refcount_set(&node->refcount, 1);
  2521. err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht,
  2522. &node->ht_node,
  2523. mlxsw_sp_ipv6_addr_ht_params);
  2524. if (err)
  2525. goto err_rhashtable_insert;
  2526. return 0;
  2527. err_rhashtable_insert:
  2528. kfree(node);
  2529. err_node_alloc:
  2530. err_rips_write:
  2531. mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
  2532. *p_kvdl_index);
  2533. return err;
  2534. }
  2535. static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
  2536. struct mlxsw_sp_ipv6_addr_node *node)
  2537. {
  2538. u32 kvdl_index = node->kvdl_index;
  2539. rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
  2540. mlxsw_sp_ipv6_addr_ht_params);
  2541. kfree(node);
  2542. mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
  2543. kvdl_index);
  2544. }
  2545. int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
  2546. const struct in6_addr *addr6,
  2547. u32 *p_kvdl_index)
  2548. {
  2549. struct mlxsw_sp_ipv6_addr_node *node;
  2550. int err = 0;
  2551. mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
  2552. node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
  2553. mlxsw_sp_ipv6_addr_ht_params);
  2554. if (node) {
  2555. refcount_inc(&node->refcount);
  2556. *p_kvdl_index = node->kvdl_index;
  2557. goto out_unlock;
  2558. }
  2559. err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
  2560. out_unlock:
  2561. mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
  2562. return err;
  2563. }
  2564. void
  2565. mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
  2566. {
  2567. struct mlxsw_sp_ipv6_addr_node *node;
  2568. mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
  2569. node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
  2570. mlxsw_sp_ipv6_addr_ht_params);
  2571. if (WARN_ON(!node))
  2572. goto out_unlock;
  2573. if (!refcount_dec_and_test(&node->refcount))
  2574. goto out_unlock;
  2575. mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
  2576. out_unlock:
  2577. mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
  2578. }
  2579. static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
  2580. {
  2581. int err;
  2582. err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht,
  2583. &mlxsw_sp_ipv6_addr_ht_params);
  2584. if (err)
  2585. return err;
  2586. mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
  2587. return 0;
  2588. }
  2589. static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
  2590. {
  2591. mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock);
  2592. rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht);
  2593. }
  2594. static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
  2595. const struct mlxsw_bus_info *mlxsw_bus_info,
  2596. struct netlink_ext_ack *extack)
  2597. {
  2598. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  2599. int err;
  2600. mlxsw_sp->core = mlxsw_core;
  2601. mlxsw_sp->bus_info = mlxsw_bus_info;
  2602. mlxsw_sp_parsing_init(mlxsw_sp);
  2603. mlxsw_core_emad_string_tlv_enable(mlxsw_core);
  2604. err = mlxsw_sp_base_mac_get(mlxsw_sp);
  2605. if (err) {
  2606. dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
  2607. return err;
  2608. }
  2609. err = mlxsw_sp_kvdl_init(mlxsw_sp);
  2610. if (err) {
  2611. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
  2612. return err;
  2613. }
  2614. err = mlxsw_sp_pgt_init(mlxsw_sp);
  2615. if (err) {
  2616. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
  2617. goto err_pgt_init;
  2618. }
  2619. err = mlxsw_sp_fids_init(mlxsw_sp);
  2620. if (err) {
  2621. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
  2622. goto err_fids_init;
  2623. }
  2624. err = mlxsw_sp_policers_init(mlxsw_sp);
  2625. if (err) {
  2626. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
  2627. goto err_policers_init;
  2628. }
  2629. err = mlxsw_sp_traps_init(mlxsw_sp);
  2630. if (err) {
  2631. dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
  2632. goto err_traps_init;
  2633. }
  2634. err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
  2635. if (err) {
  2636. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
  2637. goto err_devlink_traps_init;
  2638. }
  2639. err = mlxsw_sp_buffers_init(mlxsw_sp);
  2640. if (err) {
  2641. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
  2642. goto err_buffers_init;
  2643. }
  2644. err = mlxsw_sp_lag_init(mlxsw_sp);
  2645. if (err) {
  2646. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
  2647. goto err_lag_init;
  2648. }
  2649. /* Initialize SPAN before router and switchdev, so that those components
  2650. * can call mlxsw_sp_span_respin().
  2651. */
  2652. err = mlxsw_sp_span_init(mlxsw_sp);
  2653. if (err) {
  2654. dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
  2655. goto err_span_init;
  2656. }
  2657. err = mlxsw_sp_switchdev_init(mlxsw_sp);
  2658. if (err) {
  2659. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
  2660. goto err_switchdev_init;
  2661. }
  2662. err = mlxsw_sp_counter_pool_init(mlxsw_sp);
  2663. if (err) {
  2664. dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
  2665. goto err_counter_pool_init;
  2666. }
  2667. err = mlxsw_sp_afa_init(mlxsw_sp);
  2668. if (err) {
  2669. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
  2670. goto err_afa_init;
  2671. }
  2672. err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
  2673. if (err) {
  2674. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
  2675. goto err_ipv6_addr_ht_init;
  2676. }
  2677. err = mlxsw_sp_nve_init(mlxsw_sp);
  2678. if (err) {
  2679. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
  2680. goto err_nve_init;
  2681. }
  2682. err = mlxsw_sp_acl_init(mlxsw_sp);
  2683. if (err) {
  2684. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
  2685. goto err_acl_init;
  2686. }
  2687. err = mlxsw_sp_router_init(mlxsw_sp, extack);
  2688. if (err) {
  2689. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
  2690. goto err_router_init;
  2691. }
  2692. if (mlxsw_sp->bus_info->read_clock_capable) {
  2693. /* NULL is a valid return value from clock_init */
  2694. mlxsw_sp->clock =
  2695. mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
  2696. mlxsw_sp->bus_info->dev);
  2697. if (IS_ERR(mlxsw_sp->clock)) {
  2698. err = PTR_ERR(mlxsw_sp->clock);
  2699. dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
  2700. goto err_ptp_clock_init;
  2701. }
  2702. }
  2703. if (mlxsw_sp->clock) {
  2704. /* NULL is a valid return value from ptp_ops->init */
  2705. mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
  2706. if (IS_ERR(mlxsw_sp->ptp_state)) {
  2707. err = PTR_ERR(mlxsw_sp->ptp_state);
  2708. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
  2709. goto err_ptp_init;
  2710. }
  2711. }
  2712. /* Initialize netdevice notifier after SPAN is initialized, so that the
  2713. * event handler can call SPAN respin.
  2714. */
  2715. mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
  2716. err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
  2717. &mlxsw_sp->netdevice_nb);
  2718. if (err) {
  2719. dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
  2720. goto err_netdev_notifier;
  2721. }
  2722. err = mlxsw_sp_dpipe_init(mlxsw_sp);
  2723. if (err) {
  2724. dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
  2725. goto err_dpipe_init;
  2726. }
  2727. err = mlxsw_sp_port_module_info_init(mlxsw_sp);
  2728. if (err) {
  2729. dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
  2730. goto err_port_module_info_init;
  2731. }
  2732. err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
  2733. &mlxsw_sp_sample_trigger_ht_params);
  2734. if (err) {
  2735. dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
  2736. goto err_sample_trigger_init;
  2737. }
  2738. err = mlxsw_sp_ports_create(mlxsw_sp);
  2739. if (err) {
  2740. dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
  2741. goto err_ports_create;
  2742. }
  2743. return 0;
  2744. err_ports_create:
  2745. rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
  2746. err_sample_trigger_init:
  2747. mlxsw_sp_port_module_info_fini(mlxsw_sp);
  2748. err_port_module_info_init:
  2749. mlxsw_sp_dpipe_fini(mlxsw_sp);
  2750. err_dpipe_init:
  2751. unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
  2752. &mlxsw_sp->netdevice_nb);
  2753. err_netdev_notifier:
  2754. if (mlxsw_sp->clock)
  2755. mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
  2756. err_ptp_init:
  2757. if (mlxsw_sp->clock)
  2758. mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
  2759. err_ptp_clock_init:
  2760. mlxsw_sp_router_fini(mlxsw_sp);
  2761. err_router_init:
  2762. mlxsw_sp_acl_fini(mlxsw_sp);
  2763. err_acl_init:
  2764. mlxsw_sp_nve_fini(mlxsw_sp);
  2765. err_nve_init:
  2766. mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
  2767. err_ipv6_addr_ht_init:
  2768. mlxsw_sp_afa_fini(mlxsw_sp);
  2769. err_afa_init:
  2770. mlxsw_sp_counter_pool_fini(mlxsw_sp);
  2771. err_counter_pool_init:
  2772. mlxsw_sp_switchdev_fini(mlxsw_sp);
  2773. err_switchdev_init:
  2774. mlxsw_sp_span_fini(mlxsw_sp);
  2775. err_span_init:
  2776. mlxsw_sp_lag_fini(mlxsw_sp);
  2777. err_lag_init:
  2778. mlxsw_sp_buffers_fini(mlxsw_sp);
  2779. err_buffers_init:
  2780. mlxsw_sp_devlink_traps_fini(mlxsw_sp);
  2781. err_devlink_traps_init:
  2782. mlxsw_sp_traps_fini(mlxsw_sp);
  2783. err_traps_init:
  2784. mlxsw_sp_policers_fini(mlxsw_sp);
  2785. err_policers_init:
  2786. mlxsw_sp_fids_fini(mlxsw_sp);
  2787. err_fids_init:
  2788. mlxsw_sp_pgt_fini(mlxsw_sp);
  2789. err_pgt_init:
  2790. mlxsw_sp_kvdl_fini(mlxsw_sp);
  2791. mlxsw_sp_parsing_fini(mlxsw_sp);
  2792. return err;
  2793. }
  2794. static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
  2795. const struct mlxsw_bus_info *mlxsw_bus_info,
  2796. struct netlink_ext_ack *extack)
  2797. {
  2798. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  2799. mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
  2800. mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
  2801. mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
  2802. mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
  2803. mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
  2804. mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
  2805. mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
  2806. mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
  2807. mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
  2808. mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
  2809. mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
  2810. mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
  2811. mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
  2812. mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
  2813. mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
  2814. mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
  2815. mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
  2816. mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
  2817. mlxsw_sp->listeners = mlxsw_sp1_listener;
  2818. mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
  2819. mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr;
  2820. mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
  2821. mlxsw_sp->pgt_smpe_index_valid = true;
  2822. return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
  2823. }
  2824. static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
  2825. const struct mlxsw_bus_info *mlxsw_bus_info,
  2826. struct netlink_ext_ack *extack)
  2827. {
  2828. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  2829. mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
  2830. mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
  2831. mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
  2832. mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
  2833. mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
  2834. mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
  2835. mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
  2836. mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
  2837. mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
  2838. mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
  2839. mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
  2840. mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
  2841. mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
  2842. mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
  2843. mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
  2844. mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
  2845. mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
  2846. mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
  2847. mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
  2848. mlxsw_sp->listeners = mlxsw_sp2_listener;
  2849. mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
  2850. mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
  2851. mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
  2852. mlxsw_sp->pgt_smpe_index_valid = false;
  2853. return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
  2854. }
  2855. static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
  2856. const struct mlxsw_bus_info *mlxsw_bus_info,
  2857. struct netlink_ext_ack *extack)
  2858. {
  2859. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  2860. mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
  2861. mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
  2862. mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
  2863. mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
  2864. mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
  2865. mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
  2866. mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
  2867. mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
  2868. mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
  2869. mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
  2870. mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
  2871. mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
  2872. mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
  2873. mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
  2874. mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
  2875. mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
  2876. mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
  2877. mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
  2878. mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
  2879. mlxsw_sp->listeners = mlxsw_sp2_listener;
  2880. mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
  2881. mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
  2882. mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
  2883. mlxsw_sp->pgt_smpe_index_valid = false;
  2884. return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
  2885. }
  2886. static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
  2887. const struct mlxsw_bus_info *mlxsw_bus_info,
  2888. struct netlink_ext_ack *extack)
  2889. {
  2890. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  2891. mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
  2892. mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
  2893. mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
  2894. mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops;
  2895. mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
  2896. mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
  2897. mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
  2898. mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops;
  2899. mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
  2900. mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
  2901. mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
  2902. mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
  2903. mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
  2904. mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
  2905. mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
  2906. mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
  2907. mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
  2908. mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
  2909. mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
  2910. mlxsw_sp->listeners = mlxsw_sp2_listener;
  2911. mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
  2912. mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
  2913. mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
  2914. mlxsw_sp->pgt_smpe_index_valid = false;
  2915. return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
  2916. }
  2917. static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
  2918. {
  2919. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  2920. mlxsw_sp_ports_remove(mlxsw_sp);
  2921. rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
  2922. mlxsw_sp_port_module_info_fini(mlxsw_sp);
  2923. mlxsw_sp_dpipe_fini(mlxsw_sp);
  2924. unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
  2925. &mlxsw_sp->netdevice_nb);
  2926. if (mlxsw_sp->clock) {
  2927. mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
  2928. mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
  2929. }
  2930. mlxsw_sp_router_fini(mlxsw_sp);
  2931. mlxsw_sp_acl_fini(mlxsw_sp);
  2932. mlxsw_sp_nve_fini(mlxsw_sp);
  2933. mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
  2934. mlxsw_sp_afa_fini(mlxsw_sp);
  2935. mlxsw_sp_counter_pool_fini(mlxsw_sp);
  2936. mlxsw_sp_switchdev_fini(mlxsw_sp);
  2937. mlxsw_sp_span_fini(mlxsw_sp);
  2938. mlxsw_sp_lag_fini(mlxsw_sp);
  2939. mlxsw_sp_buffers_fini(mlxsw_sp);
  2940. mlxsw_sp_devlink_traps_fini(mlxsw_sp);
  2941. mlxsw_sp_traps_fini(mlxsw_sp);
  2942. mlxsw_sp_policers_fini(mlxsw_sp);
  2943. mlxsw_sp_fids_fini(mlxsw_sp);
  2944. mlxsw_sp_pgt_fini(mlxsw_sp);
  2945. mlxsw_sp_kvdl_fini(mlxsw_sp);
  2946. mlxsw_sp_parsing_fini(mlxsw_sp);
  2947. }
  2948. static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
  2949. .used_flood_mode = 1,
  2950. .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
  2951. .used_max_ib_mc = 1,
  2952. .max_ib_mc = 0,
  2953. .used_max_pkey = 1,
  2954. .max_pkey = 0,
  2955. .used_ubridge = 1,
  2956. .ubridge = 1,
  2957. .used_kvd_sizes = 1,
  2958. .kvd_hash_single_parts = 59,
  2959. .kvd_hash_double_parts = 41,
  2960. .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
  2961. .swid_config = {
  2962. {
  2963. .used_type = 1,
  2964. .type = MLXSW_PORT_SWID_TYPE_ETH,
  2965. }
  2966. },
  2967. };
  2968. static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
  2969. .used_flood_mode = 1,
  2970. .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
  2971. .used_max_ib_mc = 1,
  2972. .max_ib_mc = 0,
  2973. .used_max_pkey = 1,
  2974. .max_pkey = 0,
  2975. .used_ubridge = 1,
  2976. .ubridge = 1,
  2977. .swid_config = {
  2978. {
  2979. .used_type = 1,
  2980. .type = MLXSW_PORT_SWID_TYPE_ETH,
  2981. }
  2982. },
  2983. .used_cqe_time_stamp_type = 1,
  2984. .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
  2985. };
  2986. /* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
  2987. * in Spectrum-2/3, to avoid regression in number of free entries in the PGT
  2988. * table.
  2989. */
  2990. #define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128
  2991. static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
  2992. .used_max_lag = 1,
  2993. .max_lag = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG,
  2994. .used_flood_mode = 1,
  2995. .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
  2996. .used_max_ib_mc = 1,
  2997. .max_ib_mc = 0,
  2998. .used_max_pkey = 1,
  2999. .max_pkey = 0,
  3000. .used_ubridge = 1,
  3001. .ubridge = 1,
  3002. .swid_config = {
  3003. {
  3004. .used_type = 1,
  3005. .type = MLXSW_PORT_SWID_TYPE_ETH,
  3006. }
  3007. },
  3008. .used_cqe_time_stamp_type = 1,
  3009. .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
  3010. };
  3011. static void
  3012. mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
  3013. struct devlink_resource_size_params *kvd_size_params,
  3014. struct devlink_resource_size_params *linear_size_params,
  3015. struct devlink_resource_size_params *hash_double_size_params,
  3016. struct devlink_resource_size_params *hash_single_size_params)
  3017. {
  3018. u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
  3019. KVD_SINGLE_MIN_SIZE);
  3020. u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
  3021. KVD_DOUBLE_MIN_SIZE);
  3022. u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
  3023. u32 linear_size_min = 0;
  3024. devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
  3025. MLXSW_SP_KVD_GRANULARITY,
  3026. DEVLINK_RESOURCE_UNIT_ENTRY);
  3027. devlink_resource_size_params_init(linear_size_params, linear_size_min,
  3028. kvd_size - single_size_min -
  3029. double_size_min,
  3030. MLXSW_SP_KVD_GRANULARITY,
  3031. DEVLINK_RESOURCE_UNIT_ENTRY);
  3032. devlink_resource_size_params_init(hash_double_size_params,
  3033. double_size_min,
  3034. kvd_size - single_size_min -
  3035. linear_size_min,
  3036. MLXSW_SP_KVD_GRANULARITY,
  3037. DEVLINK_RESOURCE_UNIT_ENTRY);
  3038. devlink_resource_size_params_init(hash_single_size_params,
  3039. single_size_min,
  3040. kvd_size - double_size_min -
  3041. linear_size_min,
  3042. MLXSW_SP_KVD_GRANULARITY,
  3043. DEVLINK_RESOURCE_UNIT_ENTRY);
  3044. }
  3045. static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
  3046. {
  3047. struct devlink *devlink = priv_to_devlink(mlxsw_core);
  3048. struct devlink_resource_size_params hash_single_size_params;
  3049. struct devlink_resource_size_params hash_double_size_params;
  3050. struct devlink_resource_size_params linear_size_params;
  3051. struct devlink_resource_size_params kvd_size_params;
  3052. u32 kvd_size, single_size, double_size, linear_size;
  3053. const struct mlxsw_config_profile *profile;
  3054. int err;
  3055. profile = &mlxsw_sp1_config_profile;
  3056. if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
  3057. return -EIO;
  3058. mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
  3059. &linear_size_params,
  3060. &hash_double_size_params,
  3061. &hash_single_size_params);
  3062. kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
  3063. err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
  3064. kvd_size, MLXSW_SP_RESOURCE_KVD,
  3065. DEVLINK_RESOURCE_ID_PARENT_TOP,
  3066. &kvd_size_params);
  3067. if (err)
  3068. return err;
  3069. linear_size = profile->kvd_linear_size;
  3070. err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
  3071. linear_size,
  3072. MLXSW_SP_RESOURCE_KVD_LINEAR,
  3073. MLXSW_SP_RESOURCE_KVD,
  3074. &linear_size_params);
  3075. if (err)
  3076. return err;
  3077. err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
  3078. if (err)
  3079. return err;
  3080. double_size = kvd_size - linear_size;
  3081. double_size *= profile->kvd_hash_double_parts;
  3082. double_size /= profile->kvd_hash_double_parts +
  3083. profile->kvd_hash_single_parts;
  3084. double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
  3085. err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
  3086. double_size,
  3087. MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
  3088. MLXSW_SP_RESOURCE_KVD,
  3089. &hash_double_size_params);
  3090. if (err)
  3091. return err;
  3092. single_size = kvd_size - double_size - linear_size;
  3093. err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
  3094. single_size,
  3095. MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
  3096. MLXSW_SP_RESOURCE_KVD,
  3097. &hash_single_size_params);
  3098. if (err)
  3099. return err;
  3100. return 0;
  3101. }
  3102. static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
  3103. {
  3104. struct devlink *devlink = priv_to_devlink(mlxsw_core);
  3105. struct devlink_resource_size_params kvd_size_params;
  3106. u32 kvd_size;
  3107. if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
  3108. return -EIO;
  3109. kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
  3110. devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
  3111. MLXSW_SP_KVD_GRANULARITY,
  3112. DEVLINK_RESOURCE_UNIT_ENTRY);
  3113. return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
  3114. kvd_size, MLXSW_SP_RESOURCE_KVD,
  3115. DEVLINK_RESOURCE_ID_PARENT_TOP,
  3116. &kvd_size_params);
  3117. }
  3118. static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
  3119. {
  3120. struct devlink *devlink = priv_to_devlink(mlxsw_core);
  3121. struct devlink_resource_size_params span_size_params;
  3122. u32 max_span;
  3123. if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
  3124. return -EIO;
  3125. max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
  3126. devlink_resource_size_params_init(&span_size_params, max_span, max_span,
  3127. 1, DEVLINK_RESOURCE_UNIT_ENTRY);
  3128. return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
  3129. max_span, MLXSW_SP_RESOURCE_SPAN,
  3130. DEVLINK_RESOURCE_ID_PARENT_TOP,
  3131. &span_size_params);
  3132. }
  3133. static int
  3134. mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
  3135. {
  3136. struct devlink *devlink = priv_to_devlink(mlxsw_core);
  3137. struct devlink_resource_size_params size_params;
  3138. u8 max_rif_mac_profiles;
  3139. if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
  3140. max_rif_mac_profiles = 1;
  3141. else
  3142. max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
  3143. MAX_RIF_MAC_PROFILES);
  3144. devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
  3145. max_rif_mac_profiles, 1,
  3146. DEVLINK_RESOURCE_UNIT_ENTRY);
  3147. return devl_resource_register(devlink,
  3148. "rif_mac_profiles",
  3149. max_rif_mac_profiles,
  3150. MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
  3151. DEVLINK_RESOURCE_ID_PARENT_TOP,
  3152. &size_params);
  3153. }
  3154. static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
  3155. {
  3156. struct devlink *devlink = priv_to_devlink(mlxsw_core);
  3157. struct devlink_resource_size_params size_params;
  3158. u64 max_rifs;
  3159. if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
  3160. return -EIO;
  3161. max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
  3162. devlink_resource_size_params_init(&size_params, max_rifs, max_rifs,
  3163. 1, DEVLINK_RESOURCE_UNIT_ENTRY);
  3164. return devl_resource_register(devlink, "rifs", max_rifs,
  3165. MLXSW_SP_RESOURCE_RIFS,
  3166. DEVLINK_RESOURCE_ID_PARENT_TOP,
  3167. &size_params);
  3168. }
  3169. static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
  3170. {
  3171. int err;
  3172. err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
  3173. if (err)
  3174. return err;
  3175. err = mlxsw_sp_resources_span_register(mlxsw_core);
  3176. if (err)
  3177. goto err_resources_span_register;
  3178. err = mlxsw_sp_counter_resources_register(mlxsw_core);
  3179. if (err)
  3180. goto err_resources_counter_register;
  3181. err = mlxsw_sp_policer_resources_register(mlxsw_core);
  3182. if (err)
  3183. goto err_policer_resources_register;
  3184. err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
  3185. if (err)
  3186. goto err_resources_rif_mac_profile_register;
  3187. err = mlxsw_sp_resources_rifs_register(mlxsw_core);
  3188. if (err)
  3189. goto err_resources_rifs_register;
  3190. return 0;
  3191. err_resources_rifs_register:
  3192. err_resources_rif_mac_profile_register:
  3193. err_policer_resources_register:
  3194. err_resources_counter_register:
  3195. err_resources_span_register:
  3196. devl_resources_unregister(priv_to_devlink(mlxsw_core));
  3197. return err;
  3198. }
  3199. static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
  3200. {
  3201. int err;
  3202. err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
  3203. if (err)
  3204. return err;
  3205. err = mlxsw_sp_resources_span_register(mlxsw_core);
  3206. if (err)
  3207. goto err_resources_span_register;
  3208. err = mlxsw_sp_counter_resources_register(mlxsw_core);
  3209. if (err)
  3210. goto err_resources_counter_register;
  3211. err = mlxsw_sp_policer_resources_register(mlxsw_core);
  3212. if (err)
  3213. goto err_policer_resources_register;
  3214. err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
  3215. if (err)
  3216. goto err_resources_rif_mac_profile_register;
  3217. err = mlxsw_sp_resources_rifs_register(mlxsw_core);
  3218. if (err)
  3219. goto err_resources_rifs_register;
  3220. return 0;
  3221. err_resources_rifs_register:
  3222. err_resources_rif_mac_profile_register:
  3223. err_policer_resources_register:
  3224. err_resources_counter_register:
  3225. err_resources_span_register:
  3226. devl_resources_unregister(priv_to_devlink(mlxsw_core));
  3227. return err;
  3228. }
  3229. static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
  3230. const struct mlxsw_config_profile *profile,
  3231. u64 *p_single_size, u64 *p_double_size,
  3232. u64 *p_linear_size)
  3233. {
  3234. struct devlink *devlink = priv_to_devlink(mlxsw_core);
  3235. u32 double_size;
  3236. int err;
  3237. if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
  3238. !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
  3239. return -EIO;
  3240. /* The hash part is what left of the kvd without the
  3241. * linear part. It is split to the single size and
  3242. * double size by the parts ratio from the profile.
  3243. * Both sizes must be a multiplications of the
  3244. * granularity from the profile. In case the user
  3245. * provided the sizes they are obtained via devlink.
  3246. */
  3247. err = devl_resource_size_get(devlink,
  3248. MLXSW_SP_RESOURCE_KVD_LINEAR,
  3249. p_linear_size);
  3250. if (err)
  3251. *p_linear_size = profile->kvd_linear_size;
  3252. err = devl_resource_size_get(devlink,
  3253. MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
  3254. p_double_size);
  3255. if (err) {
  3256. double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
  3257. *p_linear_size;
  3258. double_size *= profile->kvd_hash_double_parts;
  3259. double_size /= profile->kvd_hash_double_parts +
  3260. profile->kvd_hash_single_parts;
  3261. *p_double_size = rounddown(double_size,
  3262. MLXSW_SP_KVD_GRANULARITY);
  3263. }
  3264. err = devl_resource_size_get(devlink,
  3265. MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
  3266. p_single_size);
  3267. if (err)
  3268. *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
  3269. *p_double_size - *p_linear_size;
  3270. /* Check results are legal. */
  3271. if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
  3272. *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
  3273. MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
  3274. return -EIO;
  3275. return 0;
  3276. }
  3277. static int
  3278. mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
  3279. struct devlink_param_gset_ctx *ctx)
  3280. {
  3281. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  3282. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  3283. ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
  3284. return 0;
  3285. }
  3286. static int
  3287. mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
  3288. struct devlink_param_gset_ctx *ctx)
  3289. {
  3290. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  3291. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  3292. return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
  3293. }
  3294. static const struct devlink_param mlxsw_sp2_devlink_params[] = {
  3295. DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
  3296. "acl_region_rehash_interval",
  3297. DEVLINK_PARAM_TYPE_U32,
  3298. BIT(DEVLINK_PARAM_CMODE_RUNTIME),
  3299. mlxsw_sp_params_acl_region_rehash_intrvl_get,
  3300. mlxsw_sp_params_acl_region_rehash_intrvl_set,
  3301. NULL),
  3302. };
  3303. static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
  3304. {
  3305. struct devlink *devlink = priv_to_devlink(mlxsw_core);
  3306. union devlink_param_value value;
  3307. int err;
  3308. err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
  3309. ARRAY_SIZE(mlxsw_sp2_devlink_params));
  3310. if (err)
  3311. return err;
  3312. value.vu32 = 0;
  3313. devlink_param_driverinit_value_set(devlink,
  3314. MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
  3315. value);
  3316. return 0;
  3317. }
  3318. static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
  3319. {
  3320. devlink_params_unregister(priv_to_devlink(mlxsw_core),
  3321. mlxsw_sp2_devlink_params,
  3322. ARRAY_SIZE(mlxsw_sp2_devlink_params));
  3323. }
  3324. static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
  3325. struct sk_buff *skb, u16 local_port)
  3326. {
  3327. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  3328. skb_pull(skb, MLXSW_TXHDR_LEN);
  3329. mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
  3330. }
  3331. static struct mlxsw_driver mlxsw_sp1_driver = {
  3332. .kind = mlxsw_sp1_driver_name,
  3333. .priv_size = sizeof(struct mlxsw_sp),
  3334. .fw_req_rev = &mlxsw_sp1_fw_rev,
  3335. .fw_filename = MLXSW_SP1_FW_FILENAME,
  3336. .init = mlxsw_sp1_init,
  3337. .fini = mlxsw_sp_fini,
  3338. .port_split = mlxsw_sp_port_split,
  3339. .port_unsplit = mlxsw_sp_port_unsplit,
  3340. .sb_pool_get = mlxsw_sp_sb_pool_get,
  3341. .sb_pool_set = mlxsw_sp_sb_pool_set,
  3342. .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
  3343. .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
  3344. .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
  3345. .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
  3346. .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
  3347. .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
  3348. .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
  3349. .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
  3350. .trap_init = mlxsw_sp_trap_init,
  3351. .trap_fini = mlxsw_sp_trap_fini,
  3352. .trap_action_set = mlxsw_sp_trap_action_set,
  3353. .trap_group_init = mlxsw_sp_trap_group_init,
  3354. .trap_group_set = mlxsw_sp_trap_group_set,
  3355. .trap_policer_init = mlxsw_sp_trap_policer_init,
  3356. .trap_policer_fini = mlxsw_sp_trap_policer_fini,
  3357. .trap_policer_set = mlxsw_sp_trap_policer_set,
  3358. .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
  3359. .txhdr_construct = mlxsw_sp_txhdr_construct,
  3360. .resources_register = mlxsw_sp1_resources_register,
  3361. .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
  3362. .ptp_transmitted = mlxsw_sp_ptp_transmitted,
  3363. .txhdr_len = MLXSW_TXHDR_LEN,
  3364. .profile = &mlxsw_sp1_config_profile,
  3365. .sdq_supports_cqe_v2 = false,
  3366. };
  3367. static struct mlxsw_driver mlxsw_sp2_driver = {
  3368. .kind = mlxsw_sp2_driver_name,
  3369. .priv_size = sizeof(struct mlxsw_sp),
  3370. .fw_req_rev = &mlxsw_sp2_fw_rev,
  3371. .fw_filename = MLXSW_SP2_FW_FILENAME,
  3372. .init = mlxsw_sp2_init,
  3373. .fini = mlxsw_sp_fini,
  3374. .port_split = mlxsw_sp_port_split,
  3375. .port_unsplit = mlxsw_sp_port_unsplit,
  3376. .ports_remove_selected = mlxsw_sp_ports_remove_selected,
  3377. .sb_pool_get = mlxsw_sp_sb_pool_get,
  3378. .sb_pool_set = mlxsw_sp_sb_pool_set,
  3379. .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
  3380. .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
  3381. .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
  3382. .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
  3383. .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
  3384. .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
  3385. .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
  3386. .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
  3387. .trap_init = mlxsw_sp_trap_init,
  3388. .trap_fini = mlxsw_sp_trap_fini,
  3389. .trap_action_set = mlxsw_sp_trap_action_set,
  3390. .trap_group_init = mlxsw_sp_trap_group_init,
  3391. .trap_group_set = mlxsw_sp_trap_group_set,
  3392. .trap_policer_init = mlxsw_sp_trap_policer_init,
  3393. .trap_policer_fini = mlxsw_sp_trap_policer_fini,
  3394. .trap_policer_set = mlxsw_sp_trap_policer_set,
  3395. .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
  3396. .txhdr_construct = mlxsw_sp_txhdr_construct,
  3397. .resources_register = mlxsw_sp2_resources_register,
  3398. .params_register = mlxsw_sp2_params_register,
  3399. .params_unregister = mlxsw_sp2_params_unregister,
  3400. .ptp_transmitted = mlxsw_sp_ptp_transmitted,
  3401. .txhdr_len = MLXSW_TXHDR_LEN,
  3402. .profile = &mlxsw_sp2_config_profile,
  3403. .sdq_supports_cqe_v2 = true,
  3404. };
  3405. static struct mlxsw_driver mlxsw_sp3_driver = {
  3406. .kind = mlxsw_sp3_driver_name,
  3407. .priv_size = sizeof(struct mlxsw_sp),
  3408. .fw_req_rev = &mlxsw_sp3_fw_rev,
  3409. .fw_filename = MLXSW_SP3_FW_FILENAME,
  3410. .init = mlxsw_sp3_init,
  3411. .fini = mlxsw_sp_fini,
  3412. .port_split = mlxsw_sp_port_split,
  3413. .port_unsplit = mlxsw_sp_port_unsplit,
  3414. .ports_remove_selected = mlxsw_sp_ports_remove_selected,
  3415. .sb_pool_get = mlxsw_sp_sb_pool_get,
  3416. .sb_pool_set = mlxsw_sp_sb_pool_set,
  3417. .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
  3418. .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
  3419. .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
  3420. .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
  3421. .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
  3422. .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
  3423. .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
  3424. .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
  3425. .trap_init = mlxsw_sp_trap_init,
  3426. .trap_fini = mlxsw_sp_trap_fini,
  3427. .trap_action_set = mlxsw_sp_trap_action_set,
  3428. .trap_group_init = mlxsw_sp_trap_group_init,
  3429. .trap_group_set = mlxsw_sp_trap_group_set,
  3430. .trap_policer_init = mlxsw_sp_trap_policer_init,
  3431. .trap_policer_fini = mlxsw_sp_trap_policer_fini,
  3432. .trap_policer_set = mlxsw_sp_trap_policer_set,
  3433. .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
  3434. .txhdr_construct = mlxsw_sp_txhdr_construct,
  3435. .resources_register = mlxsw_sp2_resources_register,
  3436. .params_register = mlxsw_sp2_params_register,
  3437. .params_unregister = mlxsw_sp2_params_unregister,
  3438. .ptp_transmitted = mlxsw_sp_ptp_transmitted,
  3439. .txhdr_len = MLXSW_TXHDR_LEN,
  3440. .profile = &mlxsw_sp2_config_profile,
  3441. .sdq_supports_cqe_v2 = true,
  3442. };
  3443. static struct mlxsw_driver mlxsw_sp4_driver = {
  3444. .kind = mlxsw_sp4_driver_name,
  3445. .priv_size = sizeof(struct mlxsw_sp),
  3446. .init = mlxsw_sp4_init,
  3447. .fini = mlxsw_sp_fini,
  3448. .port_split = mlxsw_sp_port_split,
  3449. .port_unsplit = mlxsw_sp_port_unsplit,
  3450. .ports_remove_selected = mlxsw_sp_ports_remove_selected,
  3451. .sb_pool_get = mlxsw_sp_sb_pool_get,
  3452. .sb_pool_set = mlxsw_sp_sb_pool_set,
  3453. .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
  3454. .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
  3455. .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
  3456. .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
  3457. .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
  3458. .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
  3459. .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
  3460. .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
  3461. .trap_init = mlxsw_sp_trap_init,
  3462. .trap_fini = mlxsw_sp_trap_fini,
  3463. .trap_action_set = mlxsw_sp_trap_action_set,
  3464. .trap_group_init = mlxsw_sp_trap_group_init,
  3465. .trap_group_set = mlxsw_sp_trap_group_set,
  3466. .trap_policer_init = mlxsw_sp_trap_policer_init,
  3467. .trap_policer_fini = mlxsw_sp_trap_policer_fini,
  3468. .trap_policer_set = mlxsw_sp_trap_policer_set,
  3469. .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
  3470. .txhdr_construct = mlxsw_sp_txhdr_construct,
  3471. .resources_register = mlxsw_sp2_resources_register,
  3472. .params_register = mlxsw_sp2_params_register,
  3473. .params_unregister = mlxsw_sp2_params_unregister,
  3474. .ptp_transmitted = mlxsw_sp_ptp_transmitted,
  3475. .txhdr_len = MLXSW_TXHDR_LEN,
  3476. .profile = &mlxsw_sp4_config_profile,
  3477. .sdq_supports_cqe_v2 = true,
  3478. };
  3479. bool mlxsw_sp_port_dev_check(const struct net_device *dev)
  3480. {
  3481. return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
  3482. }
  3483. static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
  3484. struct netdev_nested_priv *priv)
  3485. {
  3486. int ret = 0;
  3487. if (mlxsw_sp_port_dev_check(lower_dev)) {
  3488. priv->data = (void *)netdev_priv(lower_dev);
  3489. ret = 1;
  3490. }
  3491. return ret;
  3492. }
  3493. struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
  3494. {
  3495. struct netdev_nested_priv priv = {
  3496. .data = NULL,
  3497. };
  3498. if (mlxsw_sp_port_dev_check(dev))
  3499. return netdev_priv(dev);
  3500. netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
  3501. return (struct mlxsw_sp_port *)priv.data;
  3502. }
  3503. struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
  3504. {
  3505. struct mlxsw_sp_port *mlxsw_sp_port;
  3506. mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
  3507. return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
  3508. }
  3509. struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
  3510. {
  3511. struct netdev_nested_priv priv = {
  3512. .data = NULL,
  3513. };
  3514. if (mlxsw_sp_port_dev_check(dev))
  3515. return netdev_priv(dev);
  3516. netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
  3517. &priv);
  3518. return (struct mlxsw_sp_port *)priv.data;
  3519. }
  3520. struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
  3521. {
  3522. struct mlxsw_sp_port *mlxsw_sp_port;
  3523. rcu_read_lock();
  3524. mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
  3525. if (mlxsw_sp_port)
  3526. dev_hold(mlxsw_sp_port->dev);
  3527. rcu_read_unlock();
  3528. return mlxsw_sp_port;
  3529. }
  3530. void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
  3531. {
  3532. dev_put(mlxsw_sp_port->dev);
  3533. }
  3534. int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp)
  3535. {
  3536. char mprs_pl[MLXSW_REG_MPRS_LEN];
  3537. int err = 0;
  3538. mutex_lock(&mlxsw_sp->parsing.lock);
  3539. if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref))
  3540. goto out_unlock;
  3541. mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH,
  3542. mlxsw_sp->parsing.vxlan_udp_dport);
  3543. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
  3544. if (err)
  3545. goto out_unlock;
  3546. mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH;
  3547. refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1);
  3548. out_unlock:
  3549. mutex_unlock(&mlxsw_sp->parsing.lock);
  3550. return err;
  3551. }
  3552. void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp)
  3553. {
  3554. char mprs_pl[MLXSW_REG_MPRS_LEN];
  3555. mutex_lock(&mlxsw_sp->parsing.lock);
  3556. if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref))
  3557. goto out_unlock;
  3558. mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH,
  3559. mlxsw_sp->parsing.vxlan_udp_dport);
  3560. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
  3561. mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
  3562. out_unlock:
  3563. mutex_unlock(&mlxsw_sp->parsing.lock);
  3564. }
  3565. int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp,
  3566. __be16 udp_dport)
  3567. {
  3568. char mprs_pl[MLXSW_REG_MPRS_LEN];
  3569. int err;
  3570. mutex_lock(&mlxsw_sp->parsing.lock);
  3571. mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth,
  3572. be16_to_cpu(udp_dport));
  3573. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
  3574. if (err)
  3575. goto out_unlock;
  3576. mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport);
  3577. out_unlock:
  3578. mutex_unlock(&mlxsw_sp->parsing.lock);
  3579. return err;
  3580. }
  3581. static void
  3582. mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
  3583. struct net_device *lag_dev)
  3584. {
  3585. struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
  3586. struct net_device *upper_dev;
  3587. struct list_head *iter;
  3588. if (netif_is_bridge_port(lag_dev))
  3589. mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
  3590. netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
  3591. if (!netif_is_bridge_port(upper_dev))
  3592. continue;
  3593. br_dev = netdev_master_upper_dev_get(upper_dev);
  3594. mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
  3595. }
  3596. }
  3597. static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
  3598. {
  3599. char sldr_pl[MLXSW_REG_SLDR_LEN];
  3600. mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
  3601. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
  3602. }
  3603. static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
  3604. {
  3605. char sldr_pl[MLXSW_REG_SLDR_LEN];
  3606. mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
  3607. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
  3608. }
  3609. static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
  3610. u16 lag_id, u8 port_index)
  3611. {
  3612. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3613. char slcor_pl[MLXSW_REG_SLCOR_LEN];
  3614. mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
  3615. lag_id, port_index);
  3616. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
  3617. }
  3618. static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
  3619. u16 lag_id)
  3620. {
  3621. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3622. char slcor_pl[MLXSW_REG_SLCOR_LEN];
  3623. mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
  3624. lag_id);
  3625. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
  3626. }
  3627. static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
  3628. u16 lag_id)
  3629. {
  3630. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3631. char slcor_pl[MLXSW_REG_SLCOR_LEN];
  3632. mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
  3633. lag_id);
  3634. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
  3635. }
  3636. static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
  3637. u16 lag_id)
  3638. {
  3639. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3640. char slcor_pl[MLXSW_REG_SLCOR_LEN];
  3641. mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
  3642. lag_id);
  3643. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
  3644. }
  3645. static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
  3646. struct net_device *lag_dev,
  3647. u16 *p_lag_id)
  3648. {
  3649. struct mlxsw_sp_upper *lag;
  3650. int free_lag_id = -1;
  3651. u16 max_lag;
  3652. int err, i;
  3653. err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
  3654. if (err)
  3655. return err;
  3656. for (i = 0; i < max_lag; i++) {
  3657. lag = mlxsw_sp_lag_get(mlxsw_sp, i);
  3658. if (lag->ref_count) {
  3659. if (lag->dev == lag_dev) {
  3660. *p_lag_id = i;
  3661. return 0;
  3662. }
  3663. } else if (free_lag_id < 0) {
  3664. free_lag_id = i;
  3665. }
  3666. }
  3667. if (free_lag_id < 0)
  3668. return -EBUSY;
  3669. *p_lag_id = free_lag_id;
  3670. return 0;
  3671. }
  3672. static bool
  3673. mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
  3674. struct net_device *lag_dev,
  3675. struct netdev_lag_upper_info *lag_upper_info,
  3676. struct netlink_ext_ack *extack)
  3677. {
  3678. u16 lag_id;
  3679. if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
  3680. NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
  3681. return false;
  3682. }
  3683. if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
  3684. NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
  3685. return false;
  3686. }
  3687. return true;
  3688. }
  3689. static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
  3690. u16 lag_id, u8 *p_port_index)
  3691. {
  3692. u64 max_lag_members;
  3693. int i;
  3694. max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  3695. MAX_LAG_MEMBERS);
  3696. for (i = 0; i < max_lag_members; i++) {
  3697. if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
  3698. *p_port_index = i;
  3699. return 0;
  3700. }
  3701. }
  3702. return -EBUSY;
  3703. }
  3704. static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
  3705. struct net_device *lag_dev,
  3706. struct netlink_ext_ack *extack)
  3707. {
  3708. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3709. struct mlxsw_sp_upper *lag;
  3710. u16 lag_id;
  3711. u8 port_index;
  3712. int err;
  3713. err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
  3714. if (err)
  3715. return err;
  3716. lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
  3717. if (!lag->ref_count) {
  3718. err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
  3719. if (err)
  3720. return err;
  3721. lag->dev = lag_dev;
  3722. }
  3723. err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
  3724. if (err)
  3725. return err;
  3726. err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
  3727. if (err)
  3728. goto err_col_port_add;
  3729. mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
  3730. mlxsw_sp_port->local_port);
  3731. mlxsw_sp_port->lag_id = lag_id;
  3732. mlxsw_sp_port->lagged = 1;
  3733. lag->ref_count++;
  3734. /* Port is no longer usable as a router interface */
  3735. if (mlxsw_sp_port->default_vlan->fid)
  3736. mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
  3737. /* Join a router interface configured on the LAG, if exists */
  3738. err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan,
  3739. lag_dev, extack);
  3740. if (err)
  3741. goto err_router_join;
  3742. return 0;
  3743. err_router_join:
  3744. lag->ref_count--;
  3745. mlxsw_sp_port->lagged = 0;
  3746. mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
  3747. mlxsw_sp_port->local_port);
  3748. mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
  3749. err_col_port_add:
  3750. if (!lag->ref_count)
  3751. mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
  3752. return err;
  3753. }
  3754. static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
  3755. struct net_device *lag_dev)
  3756. {
  3757. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3758. u16 lag_id = mlxsw_sp_port->lag_id;
  3759. struct mlxsw_sp_upper *lag;
  3760. if (!mlxsw_sp_port->lagged)
  3761. return;
  3762. lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
  3763. WARN_ON(lag->ref_count == 0);
  3764. mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
  3765. /* Any VLANs configured on the port are no longer valid */
  3766. mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
  3767. mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
  3768. /* Make the LAG and its directly linked uppers leave bridges they
  3769. * are memeber in
  3770. */
  3771. mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
  3772. if (lag->ref_count == 1)
  3773. mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
  3774. mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
  3775. mlxsw_sp_port->local_port);
  3776. mlxsw_sp_port->lagged = 0;
  3777. lag->ref_count--;
  3778. /* Make sure untagged frames are allowed to ingress */
  3779. mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
  3780. ETH_P_8021Q);
  3781. }
  3782. static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
  3783. u16 lag_id)
  3784. {
  3785. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3786. char sldr_pl[MLXSW_REG_SLDR_LEN];
  3787. mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
  3788. mlxsw_sp_port->local_port);
  3789. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
  3790. }
  3791. static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
  3792. u16 lag_id)
  3793. {
  3794. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3795. char sldr_pl[MLXSW_REG_SLDR_LEN];
  3796. mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
  3797. mlxsw_sp_port->local_port);
  3798. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
  3799. }
  3800. static int
  3801. mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
  3802. {
  3803. int err;
  3804. err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
  3805. mlxsw_sp_port->lag_id);
  3806. if (err)
  3807. return err;
  3808. err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
  3809. if (err)
  3810. goto err_dist_port_add;
  3811. return 0;
  3812. err_dist_port_add:
  3813. mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
  3814. return err;
  3815. }
  3816. static int
  3817. mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
  3818. {
  3819. int err;
  3820. err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
  3821. mlxsw_sp_port->lag_id);
  3822. if (err)
  3823. return err;
  3824. err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
  3825. mlxsw_sp_port->lag_id);
  3826. if (err)
  3827. goto err_col_port_disable;
  3828. return 0;
  3829. err_col_port_disable:
  3830. mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
  3831. return err;
  3832. }
  3833. static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
  3834. struct netdev_lag_lower_state_info *info)
  3835. {
  3836. if (info->tx_enabled)
  3837. return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
  3838. else
  3839. return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
  3840. }
  3841. static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
  3842. bool enable)
  3843. {
  3844. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3845. enum mlxsw_reg_spms_state spms_state;
  3846. char *spms_pl;
  3847. u16 vid;
  3848. int err;
  3849. spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
  3850. MLXSW_REG_SPMS_STATE_DISCARDING;
  3851. spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
  3852. if (!spms_pl)
  3853. return -ENOMEM;
  3854. mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
  3855. for (vid = 0; vid < VLAN_N_VID; vid++)
  3856. mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
  3857. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
  3858. kfree(spms_pl);
  3859. return err;
  3860. }
  3861. static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
  3862. {
  3863. u16 vid = 1;
  3864. int err;
  3865. err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
  3866. if (err)
  3867. return err;
  3868. err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
  3869. if (err)
  3870. goto err_port_stp_set;
  3871. err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
  3872. true, false);
  3873. if (err)
  3874. goto err_port_vlan_set;
  3875. for (; vid <= VLAN_N_VID - 1; vid++) {
  3876. err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
  3877. vid, false);
  3878. if (err)
  3879. goto err_vid_learning_set;
  3880. }
  3881. return 0;
  3882. err_vid_learning_set:
  3883. for (vid--; vid >= 1; vid--)
  3884. mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
  3885. err_port_vlan_set:
  3886. mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
  3887. err_port_stp_set:
  3888. mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
  3889. return err;
  3890. }
  3891. static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
  3892. {
  3893. u16 vid;
  3894. for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
  3895. mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
  3896. vid, true);
  3897. mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
  3898. false, false);
  3899. mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
  3900. mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
  3901. }
  3902. static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
  3903. {
  3904. unsigned int num_vxlans = 0;
  3905. struct net_device *dev;
  3906. struct list_head *iter;
  3907. netdev_for_each_lower_dev(br_dev, dev, iter) {
  3908. if (netif_is_vxlan(dev))
  3909. num_vxlans++;
  3910. }
  3911. return num_vxlans > 1;
  3912. }
  3913. static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
  3914. {
  3915. DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
  3916. struct net_device *dev;
  3917. struct list_head *iter;
  3918. netdev_for_each_lower_dev(br_dev, dev, iter) {
  3919. u16 pvid;
  3920. int err;
  3921. if (!netif_is_vxlan(dev))
  3922. continue;
  3923. err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
  3924. if (err || !pvid)
  3925. continue;
  3926. if (test_and_set_bit(pvid, vlans))
  3927. return false;
  3928. }
  3929. return true;
  3930. }
  3931. static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
  3932. struct netlink_ext_ack *extack)
  3933. {
  3934. if (br_multicast_enabled(br_dev)) {
  3935. NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
  3936. return false;
  3937. }
  3938. if (!br_vlan_enabled(br_dev) &&
  3939. mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
  3940. NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
  3941. return false;
  3942. }
  3943. if (br_vlan_enabled(br_dev) &&
  3944. !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
  3945. NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
  3946. return false;
  3947. }
  3948. return true;
  3949. }
  3950. static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
  3951. struct net_device *dev,
  3952. unsigned long event, void *ptr)
  3953. {
  3954. struct netdev_notifier_changeupper_info *info;
  3955. struct mlxsw_sp_port *mlxsw_sp_port;
  3956. struct netlink_ext_ack *extack;
  3957. struct net_device *upper_dev;
  3958. struct mlxsw_sp *mlxsw_sp;
  3959. int err = 0;
  3960. u16 proto;
  3961. mlxsw_sp_port = netdev_priv(dev);
  3962. mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  3963. info = ptr;
  3964. extack = netdev_notifier_info_to_extack(&info->info);
  3965. switch (event) {
  3966. case NETDEV_PRECHANGEUPPER:
  3967. upper_dev = info->upper_dev;
  3968. if (!is_vlan_dev(upper_dev) &&
  3969. !netif_is_lag_master(upper_dev) &&
  3970. !netif_is_bridge_master(upper_dev) &&
  3971. !netif_is_ovs_master(upper_dev) &&
  3972. !netif_is_macvlan(upper_dev) &&
  3973. !netif_is_l3_master(upper_dev)) {
  3974. NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
  3975. return -EINVAL;
  3976. }
  3977. if (!info->linking)
  3978. break;
  3979. if (netif_is_bridge_master(upper_dev) &&
  3980. !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
  3981. mlxsw_sp_bridge_has_vxlan(upper_dev) &&
  3982. !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
  3983. return -EOPNOTSUPP;
  3984. if (netdev_has_any_upper_dev(upper_dev) &&
  3985. (!netif_is_bridge_master(upper_dev) ||
  3986. !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
  3987. upper_dev))) {
  3988. NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
  3989. return -EINVAL;
  3990. }
  3991. if (netif_is_lag_master(upper_dev) &&
  3992. !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
  3993. info->upper_info, extack))
  3994. return -EINVAL;
  3995. if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
  3996. NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
  3997. return -EINVAL;
  3998. }
  3999. if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
  4000. !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
  4001. NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
  4002. return -EINVAL;
  4003. }
  4004. if (netif_is_macvlan(upper_dev) &&
  4005. !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
  4006. NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
  4007. return -EOPNOTSUPP;
  4008. }
  4009. if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
  4010. NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
  4011. return -EINVAL;
  4012. }
  4013. if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
  4014. NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
  4015. return -EINVAL;
  4016. }
  4017. if (netif_is_bridge_master(upper_dev)) {
  4018. br_vlan_get_proto(upper_dev, &proto);
  4019. if (br_vlan_enabled(upper_dev) &&
  4020. proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
  4021. NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
  4022. return -EOPNOTSUPP;
  4023. }
  4024. if (vlan_uses_dev(lower_dev) &&
  4025. br_vlan_enabled(upper_dev) &&
  4026. proto == ETH_P_8021AD) {
  4027. NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
  4028. return -EOPNOTSUPP;
  4029. }
  4030. }
  4031. if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
  4032. struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
  4033. if (br_vlan_enabled(br_dev)) {
  4034. br_vlan_get_proto(br_dev, &proto);
  4035. if (proto == ETH_P_8021AD) {
  4036. NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
  4037. return -EOPNOTSUPP;
  4038. }
  4039. }
  4040. }
  4041. if (is_vlan_dev(upper_dev) &&
  4042. ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
  4043. NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
  4044. return -EOPNOTSUPP;
  4045. }
  4046. break;
  4047. case NETDEV_CHANGEUPPER:
  4048. upper_dev = info->upper_dev;
  4049. if (netif_is_bridge_master(upper_dev)) {
  4050. if (info->linking)
  4051. err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
  4052. lower_dev,
  4053. upper_dev,
  4054. extack);
  4055. else
  4056. mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
  4057. lower_dev,
  4058. upper_dev);
  4059. } else if (netif_is_lag_master(upper_dev)) {
  4060. if (info->linking) {
  4061. err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
  4062. upper_dev, extack);
  4063. } else {
  4064. mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
  4065. mlxsw_sp_port_lag_leave(mlxsw_sp_port,
  4066. upper_dev);
  4067. }
  4068. } else if (netif_is_ovs_master(upper_dev)) {
  4069. if (info->linking)
  4070. err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
  4071. else
  4072. mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
  4073. } else if (netif_is_macvlan(upper_dev)) {
  4074. if (!info->linking)
  4075. mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
  4076. } else if (is_vlan_dev(upper_dev)) {
  4077. struct net_device *br_dev;
  4078. if (!netif_is_bridge_port(upper_dev))
  4079. break;
  4080. if (info->linking)
  4081. break;
  4082. br_dev = netdev_master_upper_dev_get(upper_dev);
  4083. mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
  4084. br_dev);
  4085. }
  4086. break;
  4087. }
  4088. return err;
  4089. }
  4090. static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
  4091. unsigned long event, void *ptr)
  4092. {
  4093. struct netdev_notifier_changelowerstate_info *info;
  4094. struct mlxsw_sp_port *mlxsw_sp_port;
  4095. int err;
  4096. mlxsw_sp_port = netdev_priv(dev);
  4097. info = ptr;
  4098. switch (event) {
  4099. case NETDEV_CHANGELOWERSTATE:
  4100. if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
  4101. err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
  4102. info->lower_state_info);
  4103. if (err)
  4104. netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
  4105. }
  4106. break;
  4107. }
  4108. return 0;
  4109. }
  4110. static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
  4111. struct net_device *port_dev,
  4112. unsigned long event, void *ptr)
  4113. {
  4114. switch (event) {
  4115. case NETDEV_PRECHANGEUPPER:
  4116. case NETDEV_CHANGEUPPER:
  4117. return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
  4118. event, ptr);
  4119. case NETDEV_CHANGELOWERSTATE:
  4120. return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
  4121. ptr);
  4122. }
  4123. return 0;
  4124. }
  4125. static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
  4126. unsigned long event, void *ptr)
  4127. {
  4128. struct net_device *dev;
  4129. struct list_head *iter;
  4130. int ret;
  4131. netdev_for_each_lower_dev(lag_dev, dev, iter) {
  4132. if (mlxsw_sp_port_dev_check(dev)) {
  4133. ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
  4134. ptr);
  4135. if (ret)
  4136. return ret;
  4137. }
  4138. }
  4139. return 0;
  4140. }
  4141. static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
  4142. struct net_device *dev,
  4143. unsigned long event, void *ptr,
  4144. u16 vid)
  4145. {
  4146. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  4147. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  4148. struct netdev_notifier_changeupper_info *info = ptr;
  4149. struct netlink_ext_ack *extack;
  4150. struct net_device *upper_dev;
  4151. int err = 0;
  4152. extack = netdev_notifier_info_to_extack(&info->info);
  4153. switch (event) {
  4154. case NETDEV_PRECHANGEUPPER:
  4155. upper_dev = info->upper_dev;
  4156. if (!netif_is_bridge_master(upper_dev) &&
  4157. !netif_is_macvlan(upper_dev) &&
  4158. !netif_is_l3_master(upper_dev)) {
  4159. NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
  4160. return -EINVAL;
  4161. }
  4162. if (!info->linking)
  4163. break;
  4164. if (netif_is_bridge_master(upper_dev) &&
  4165. !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
  4166. mlxsw_sp_bridge_has_vxlan(upper_dev) &&
  4167. !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
  4168. return -EOPNOTSUPP;
  4169. if (netdev_has_any_upper_dev(upper_dev) &&
  4170. (!netif_is_bridge_master(upper_dev) ||
  4171. !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
  4172. upper_dev))) {
  4173. NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
  4174. return -EINVAL;
  4175. }
  4176. if (netif_is_macvlan(upper_dev) &&
  4177. !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
  4178. NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
  4179. return -EOPNOTSUPP;
  4180. }
  4181. break;
  4182. case NETDEV_CHANGEUPPER:
  4183. upper_dev = info->upper_dev;
  4184. if (netif_is_bridge_master(upper_dev)) {
  4185. if (info->linking)
  4186. err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
  4187. vlan_dev,
  4188. upper_dev,
  4189. extack);
  4190. else
  4191. mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
  4192. vlan_dev,
  4193. upper_dev);
  4194. } else if (netif_is_macvlan(upper_dev)) {
  4195. if (!info->linking)
  4196. mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
  4197. }
  4198. break;
  4199. }
  4200. return err;
  4201. }
  4202. static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
  4203. struct net_device *lag_dev,
  4204. unsigned long event,
  4205. void *ptr, u16 vid)
  4206. {
  4207. struct net_device *dev;
  4208. struct list_head *iter;
  4209. int ret;
  4210. netdev_for_each_lower_dev(lag_dev, dev, iter) {
  4211. if (mlxsw_sp_port_dev_check(dev)) {
  4212. ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
  4213. event, ptr,
  4214. vid);
  4215. if (ret)
  4216. return ret;
  4217. }
  4218. }
  4219. return 0;
  4220. }
  4221. static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
  4222. struct net_device *br_dev,
  4223. unsigned long event, void *ptr,
  4224. u16 vid)
  4225. {
  4226. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
  4227. struct netdev_notifier_changeupper_info *info = ptr;
  4228. struct netlink_ext_ack *extack;
  4229. struct net_device *upper_dev;
  4230. if (!mlxsw_sp)
  4231. return 0;
  4232. extack = netdev_notifier_info_to_extack(&info->info);
  4233. switch (event) {
  4234. case NETDEV_PRECHANGEUPPER:
  4235. upper_dev = info->upper_dev;
  4236. if (!netif_is_macvlan(upper_dev) &&
  4237. !netif_is_l3_master(upper_dev)) {
  4238. NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
  4239. return -EOPNOTSUPP;
  4240. }
  4241. if (!info->linking)
  4242. break;
  4243. if (netif_is_macvlan(upper_dev) &&
  4244. !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
  4245. NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
  4246. return -EOPNOTSUPP;
  4247. }
  4248. break;
  4249. case NETDEV_CHANGEUPPER:
  4250. upper_dev = info->upper_dev;
  4251. if (info->linking)
  4252. break;
  4253. if (netif_is_macvlan(upper_dev))
  4254. mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
  4255. break;
  4256. }
  4257. return 0;
  4258. }
  4259. static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
  4260. unsigned long event, void *ptr)
  4261. {
  4262. struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
  4263. u16 vid = vlan_dev_vlan_id(vlan_dev);
  4264. if (mlxsw_sp_port_dev_check(real_dev))
  4265. return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
  4266. event, ptr, vid);
  4267. else if (netif_is_lag_master(real_dev))
  4268. return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
  4269. real_dev, event,
  4270. ptr, vid);
  4271. else if (netif_is_bridge_master(real_dev))
  4272. return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
  4273. event, ptr, vid);
  4274. return 0;
  4275. }
  4276. static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
  4277. unsigned long event, void *ptr)
  4278. {
  4279. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
  4280. struct netdev_notifier_changeupper_info *info = ptr;
  4281. struct netlink_ext_ack *extack;
  4282. struct net_device *upper_dev;
  4283. u16 proto;
  4284. if (!mlxsw_sp)
  4285. return 0;
  4286. extack = netdev_notifier_info_to_extack(&info->info);
  4287. switch (event) {
  4288. case NETDEV_PRECHANGEUPPER:
  4289. upper_dev = info->upper_dev;
  4290. if (!is_vlan_dev(upper_dev) &&
  4291. !netif_is_macvlan(upper_dev) &&
  4292. !netif_is_l3_master(upper_dev)) {
  4293. NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
  4294. return -EOPNOTSUPP;
  4295. }
  4296. if (!info->linking)
  4297. break;
  4298. if (br_vlan_enabled(br_dev)) {
  4299. br_vlan_get_proto(br_dev, &proto);
  4300. if (proto == ETH_P_8021AD) {
  4301. NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
  4302. return -EOPNOTSUPP;
  4303. }
  4304. }
  4305. if (is_vlan_dev(upper_dev) &&
  4306. ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
  4307. NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
  4308. return -EOPNOTSUPP;
  4309. }
  4310. if (netif_is_macvlan(upper_dev) &&
  4311. !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
  4312. NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
  4313. return -EOPNOTSUPP;
  4314. }
  4315. break;
  4316. case NETDEV_CHANGEUPPER:
  4317. upper_dev = info->upper_dev;
  4318. if (info->linking)
  4319. break;
  4320. if (is_vlan_dev(upper_dev))
  4321. mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
  4322. if (netif_is_macvlan(upper_dev))
  4323. mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
  4324. break;
  4325. }
  4326. return 0;
  4327. }
  4328. static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
  4329. unsigned long event, void *ptr)
  4330. {
  4331. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
  4332. struct netdev_notifier_changeupper_info *info = ptr;
  4333. struct netlink_ext_ack *extack;
  4334. struct net_device *upper_dev;
  4335. if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
  4336. return 0;
  4337. extack = netdev_notifier_info_to_extack(&info->info);
  4338. upper_dev = info->upper_dev;
  4339. if (!netif_is_l3_master(upper_dev)) {
  4340. NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
  4341. return -EOPNOTSUPP;
  4342. }
  4343. return 0;
  4344. }
  4345. static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
  4346. struct net_device *dev,
  4347. unsigned long event, void *ptr)
  4348. {
  4349. struct netdev_notifier_changeupper_info *cu_info;
  4350. struct netdev_notifier_info *info = ptr;
  4351. struct netlink_ext_ack *extack;
  4352. struct net_device *upper_dev;
  4353. extack = netdev_notifier_info_to_extack(info);
  4354. switch (event) {
  4355. case NETDEV_CHANGEUPPER:
  4356. cu_info = container_of(info,
  4357. struct netdev_notifier_changeupper_info,
  4358. info);
  4359. upper_dev = cu_info->upper_dev;
  4360. if (!netif_is_bridge_master(upper_dev))
  4361. return 0;
  4362. if (!mlxsw_sp_lower_get(upper_dev))
  4363. return 0;
  4364. if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
  4365. return -EOPNOTSUPP;
  4366. if (cu_info->linking) {
  4367. if (!netif_running(dev))
  4368. return 0;
  4369. /* When the bridge is VLAN-aware, the VNI of the VxLAN
  4370. * device needs to be mapped to a VLAN, but at this
  4371. * point no VLANs are configured on the VxLAN device
  4372. */
  4373. if (br_vlan_enabled(upper_dev))
  4374. return 0;
  4375. return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
  4376. dev, 0, extack);
  4377. } else {
  4378. /* VLANs were already flushed, which triggered the
  4379. * necessary cleanup
  4380. */
  4381. if (br_vlan_enabled(upper_dev))
  4382. return 0;
  4383. mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
  4384. }
  4385. break;
  4386. case NETDEV_PRE_UP:
  4387. upper_dev = netdev_master_upper_dev_get(dev);
  4388. if (!upper_dev)
  4389. return 0;
  4390. if (!netif_is_bridge_master(upper_dev))
  4391. return 0;
  4392. if (!mlxsw_sp_lower_get(upper_dev))
  4393. return 0;
  4394. return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
  4395. extack);
  4396. case NETDEV_DOWN:
  4397. upper_dev = netdev_master_upper_dev_get(dev);
  4398. if (!upper_dev)
  4399. return 0;
  4400. if (!netif_is_bridge_master(upper_dev))
  4401. return 0;
  4402. if (!mlxsw_sp_lower_get(upper_dev))
  4403. return 0;
  4404. mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
  4405. break;
  4406. }
  4407. return 0;
  4408. }
  4409. static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
  4410. unsigned long event, void *ptr)
  4411. {
  4412. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  4413. struct mlxsw_sp_span_entry *span_entry;
  4414. struct mlxsw_sp *mlxsw_sp;
  4415. int err = 0;
  4416. mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
  4417. if (event == NETDEV_UNREGISTER) {
  4418. span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
  4419. if (span_entry)
  4420. mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
  4421. }
  4422. mlxsw_sp_span_respin(mlxsw_sp);
  4423. if (netif_is_vxlan(dev))
  4424. err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
  4425. else if (mlxsw_sp_port_dev_check(dev))
  4426. err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
  4427. else if (netif_is_lag_master(dev))
  4428. err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
  4429. else if (is_vlan_dev(dev))
  4430. err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
  4431. else if (netif_is_bridge_master(dev))
  4432. err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
  4433. else if (netif_is_macvlan(dev))
  4434. err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
  4435. return notifier_from_errno(err);
  4436. }
  4437. static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
  4438. .notifier_call = mlxsw_sp_inetaddr_valid_event,
  4439. };
  4440. static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
  4441. .notifier_call = mlxsw_sp_inet6addr_valid_event,
  4442. };
  4443. static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
  4444. {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
  4445. {0, },
  4446. };
  4447. static struct pci_driver mlxsw_sp1_pci_driver = {
  4448. .name = mlxsw_sp1_driver_name,
  4449. .id_table = mlxsw_sp1_pci_id_table,
  4450. };
  4451. static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
  4452. {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
  4453. {0, },
  4454. };
  4455. static struct pci_driver mlxsw_sp2_pci_driver = {
  4456. .name = mlxsw_sp2_driver_name,
  4457. .id_table = mlxsw_sp2_pci_id_table,
  4458. };
  4459. static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
  4460. {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
  4461. {0, },
  4462. };
  4463. static struct pci_driver mlxsw_sp3_pci_driver = {
  4464. .name = mlxsw_sp3_driver_name,
  4465. .id_table = mlxsw_sp3_pci_id_table,
  4466. };
  4467. static const struct pci_device_id mlxsw_sp4_pci_id_table[] = {
  4468. {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0},
  4469. {0, },
  4470. };
  4471. static struct pci_driver mlxsw_sp4_pci_driver = {
  4472. .name = mlxsw_sp4_driver_name,
  4473. .id_table = mlxsw_sp4_pci_id_table,
  4474. };
  4475. static int __init mlxsw_sp_module_init(void)
  4476. {
  4477. int err;
  4478. register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
  4479. register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
  4480. err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
  4481. if (err)
  4482. goto err_sp1_core_driver_register;
  4483. err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
  4484. if (err)
  4485. goto err_sp2_core_driver_register;
  4486. err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
  4487. if (err)
  4488. goto err_sp3_core_driver_register;
  4489. err = mlxsw_core_driver_register(&mlxsw_sp4_driver);
  4490. if (err)
  4491. goto err_sp4_core_driver_register;
  4492. err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
  4493. if (err)
  4494. goto err_sp1_pci_driver_register;
  4495. err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
  4496. if (err)
  4497. goto err_sp2_pci_driver_register;
  4498. err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
  4499. if (err)
  4500. goto err_sp3_pci_driver_register;
  4501. err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver);
  4502. if (err)
  4503. goto err_sp4_pci_driver_register;
  4504. return 0;
  4505. err_sp4_pci_driver_register:
  4506. mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
  4507. err_sp3_pci_driver_register:
  4508. mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
  4509. err_sp2_pci_driver_register:
  4510. mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
  4511. err_sp1_pci_driver_register:
  4512. mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
  4513. err_sp4_core_driver_register:
  4514. mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
  4515. err_sp3_core_driver_register:
  4516. mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
  4517. err_sp2_core_driver_register:
  4518. mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
  4519. err_sp1_core_driver_register:
  4520. unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
  4521. unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
  4522. return err;
  4523. }
  4524. static void __exit mlxsw_sp_module_exit(void)
  4525. {
  4526. mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver);
  4527. mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
  4528. mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
  4529. mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
  4530. mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
  4531. mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
  4532. mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
  4533. mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
  4534. unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
  4535. unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
  4536. }
  4537. module_init(mlxsw_sp_module_init);
  4538. module_exit(mlxsw_sp_module_exit);
  4539. MODULE_LICENSE("Dual BSD/GPL");
  4540. MODULE_AUTHOR("Jiri Pirko <[email protected]>");
  4541. MODULE_DESCRIPTION("Mellanox Spectrum driver");
  4542. MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
  4543. MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
  4544. MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
  4545. MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
  4546. MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
  4547. MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
  4548. MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
  4549. MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);