leftover.c 326 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * net/core/devlink.c - Network physical/parent device Netlink interface
  4. *
  5. * Heavily inspired by net/wireless/
  6. * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
  7. * Copyright (c) 2016 Jiri Pirko <[email protected]>
  8. */
  9. #include <linux/etherdevice.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/types.h>
  13. #include <linux/slab.h>
  14. #include <linux/gfp.h>
  15. #include <linux/device.h>
  16. #include <linux/list.h>
  17. #include <linux/netdevice.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/refcount.h>
  20. #include <linux/workqueue.h>
  21. #include <linux/u64_stats_sync.h>
  22. #include <linux/timekeeping.h>
  23. #include <rdma/ib_verbs.h>
  24. #include <net/netlink.h>
  25. #include <net/genetlink.h>
  26. #include <net/rtnetlink.h>
  27. #include <net/net_namespace.h>
  28. #include <net/sock.h>
  29. #include <net/devlink.h>
  30. #define CREATE_TRACE_POINTS
  31. #include <trace/events/devlink.h>
  32. #define DEVLINK_RELOAD_STATS_ARRAY_SIZE \
  33. (__DEVLINK_RELOAD_LIMIT_MAX * __DEVLINK_RELOAD_ACTION_MAX)
  34. struct devlink_dev_stats {
  35. u32 reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
  36. u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
  37. };
  38. struct devlink {
  39. u32 index;
  40. struct list_head port_list;
  41. struct list_head rate_list;
  42. struct list_head sb_list;
  43. struct list_head dpipe_table_list;
  44. struct list_head resource_list;
  45. struct list_head param_list;
  46. struct list_head region_list;
  47. struct list_head reporter_list;
  48. struct mutex reporters_lock; /* protects reporter_list */
  49. struct devlink_dpipe_headers *dpipe_headers;
  50. struct list_head trap_list;
  51. struct list_head trap_group_list;
  52. struct list_head trap_policer_list;
  53. struct list_head linecard_list;
  54. struct mutex linecards_lock; /* protects linecard_list */
  55. const struct devlink_ops *ops;
  56. u64 features;
  57. struct xarray snapshot_ids;
  58. struct devlink_dev_stats stats;
  59. struct device *dev;
  60. possible_net_t _net;
  61. /* Serializes access to devlink instance specific objects such as
  62. * port, sb, dpipe, resource, params, region, traps and more.
  63. */
  64. struct mutex lock;
  65. struct lock_class_key lock_key;
  66. u8 reload_failed:1;
  67. refcount_t refcount;
  68. struct completion comp;
  69. struct rcu_head rcu;
  70. char priv[] __aligned(NETDEV_ALIGN);
  71. };
  72. struct devlink_linecard_ops;
  73. struct devlink_linecard_type;
  74. struct devlink_linecard {
  75. struct list_head list;
  76. struct devlink *devlink;
  77. unsigned int index;
  78. refcount_t refcount;
  79. const struct devlink_linecard_ops *ops;
  80. void *priv;
  81. enum devlink_linecard_state state;
  82. struct mutex state_lock; /* Protects state */
  83. const char *type;
  84. struct devlink_linecard_type *types;
  85. unsigned int types_count;
  86. struct devlink *nested_devlink;
  87. };
  88. /**
  89. * struct devlink_resource - devlink resource
  90. * @name: name of the resource
  91. * @id: id, per devlink instance
  92. * @size: size of the resource
  93. * @size_new: updated size of the resource, reload is needed
  94. * @size_valid: valid in case the total size of the resource is valid
  95. * including its children
  96. * @parent: parent resource
  97. * @size_params: size parameters
  98. * @list: parent list
  99. * @resource_list: list of child resources
  100. * @occ_get: occupancy getter callback
  101. * @occ_get_priv: occupancy getter callback priv
  102. */
  103. struct devlink_resource {
  104. const char *name;
  105. u64 id;
  106. u64 size;
  107. u64 size_new;
  108. bool size_valid;
  109. struct devlink_resource *parent;
  110. struct devlink_resource_size_params size_params;
  111. struct list_head list;
  112. struct list_head resource_list;
  113. devlink_resource_occ_get_t *occ_get;
  114. void *occ_get_priv;
  115. };
  116. void *devlink_priv(struct devlink *devlink)
  117. {
  118. return &devlink->priv;
  119. }
  120. EXPORT_SYMBOL_GPL(devlink_priv);
  121. struct devlink *priv_to_devlink(void *priv)
  122. {
  123. return container_of(priv, struct devlink, priv);
  124. }
  125. EXPORT_SYMBOL_GPL(priv_to_devlink);
  126. struct device *devlink_to_dev(const struct devlink *devlink)
  127. {
  128. return devlink->dev;
  129. }
  130. EXPORT_SYMBOL_GPL(devlink_to_dev);
  131. static struct devlink_dpipe_field devlink_dpipe_fields_ethernet[] = {
  132. {
  133. .name = "destination mac",
  134. .id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC,
  135. .bitwidth = 48,
  136. },
  137. };
  138. struct devlink_dpipe_header devlink_dpipe_header_ethernet = {
  139. .name = "ethernet",
  140. .id = DEVLINK_DPIPE_HEADER_ETHERNET,
  141. .fields = devlink_dpipe_fields_ethernet,
  142. .fields_count = ARRAY_SIZE(devlink_dpipe_fields_ethernet),
  143. .global = true,
  144. };
  145. EXPORT_SYMBOL_GPL(devlink_dpipe_header_ethernet);
  146. static struct devlink_dpipe_field devlink_dpipe_fields_ipv4[] = {
  147. {
  148. .name = "destination ip",
  149. .id = DEVLINK_DPIPE_FIELD_IPV4_DST_IP,
  150. .bitwidth = 32,
  151. },
  152. };
  153. struct devlink_dpipe_header devlink_dpipe_header_ipv4 = {
  154. .name = "ipv4",
  155. .id = DEVLINK_DPIPE_HEADER_IPV4,
  156. .fields = devlink_dpipe_fields_ipv4,
  157. .fields_count = ARRAY_SIZE(devlink_dpipe_fields_ipv4),
  158. .global = true,
  159. };
  160. EXPORT_SYMBOL_GPL(devlink_dpipe_header_ipv4);
  161. static struct devlink_dpipe_field devlink_dpipe_fields_ipv6[] = {
  162. {
  163. .name = "destination ip",
  164. .id = DEVLINK_DPIPE_FIELD_IPV6_DST_IP,
  165. .bitwidth = 128,
  166. },
  167. };
  168. struct devlink_dpipe_header devlink_dpipe_header_ipv6 = {
  169. .name = "ipv6",
  170. .id = DEVLINK_DPIPE_HEADER_IPV6,
  171. .fields = devlink_dpipe_fields_ipv6,
  172. .fields_count = ARRAY_SIZE(devlink_dpipe_fields_ipv6),
  173. .global = true,
  174. };
  175. EXPORT_SYMBOL_GPL(devlink_dpipe_header_ipv6);
  176. EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg);
  177. EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr);
  178. EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_trap_report);
  179. static const struct nla_policy devlink_function_nl_policy[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1] = {
  180. [DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] = { .type = NLA_BINARY },
  181. [DEVLINK_PORT_FN_ATTR_STATE] =
  182. NLA_POLICY_RANGE(NLA_U8, DEVLINK_PORT_FN_STATE_INACTIVE,
  183. DEVLINK_PORT_FN_STATE_ACTIVE),
  184. };
  185. static const struct nla_policy devlink_selftest_nl_policy[DEVLINK_ATTR_SELFTEST_ID_MAX + 1] = {
  186. [DEVLINK_ATTR_SELFTEST_ID_FLASH] = { .type = NLA_FLAG },
  187. };
  188. static DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
  189. #define DEVLINK_REGISTERED XA_MARK_1
  190. #define DEVLINK_UNREGISTERING XA_MARK_2
  191. /* devlink instances are open to the access from the user space after
  192. * devlink_register() call. Such logical barrier allows us to have certain
  193. * expectations related to locking.
  194. *
  195. * Before *_register() - we are in initialization stage and no parallel
  196. * access possible to the devlink instance. All drivers perform that phase
  197. * by implicitly holding device_lock.
  198. *
  199. * After *_register() - users and driver can access devlink instance at
  200. * the same time.
  201. */
  202. #define ASSERT_DEVLINK_REGISTERED(d) \
  203. WARN_ON_ONCE(!xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED))
  204. #define ASSERT_DEVLINK_NOT_REGISTERED(d) \
  205. WARN_ON_ONCE(xa_get_mark(&devlinks, (d)->index, DEVLINK_REGISTERED))
  206. struct net *devlink_net(const struct devlink *devlink)
  207. {
  208. return read_pnet(&devlink->_net);
  209. }
  210. EXPORT_SYMBOL_GPL(devlink_net);
  211. static void __devlink_put_rcu(struct rcu_head *head)
  212. {
  213. struct devlink *devlink = container_of(head, struct devlink, rcu);
  214. complete(&devlink->comp);
  215. }
  216. void devlink_put(struct devlink *devlink)
  217. {
  218. if (refcount_dec_and_test(&devlink->refcount))
  219. /* Make sure unregister operation that may await the completion
  220. * is unblocked only after all users are after the end of
  221. * RCU grace period.
  222. */
  223. call_rcu(&devlink->rcu, __devlink_put_rcu);
  224. }
  225. struct devlink *__must_check devlink_try_get(struct devlink *devlink)
  226. {
  227. if (refcount_inc_not_zero(&devlink->refcount))
  228. return devlink;
  229. return NULL;
  230. }
  231. void devl_assert_locked(struct devlink *devlink)
  232. {
  233. lockdep_assert_held(&devlink->lock);
  234. }
  235. EXPORT_SYMBOL_GPL(devl_assert_locked);
  236. #ifdef CONFIG_LOCKDEP
  237. /* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */
  238. bool devl_lock_is_held(struct devlink *devlink)
  239. {
  240. return lockdep_is_held(&devlink->lock);
  241. }
  242. EXPORT_SYMBOL_GPL(devl_lock_is_held);
  243. #endif
  244. void devl_lock(struct devlink *devlink)
  245. {
  246. mutex_lock(&devlink->lock);
  247. }
  248. EXPORT_SYMBOL_GPL(devl_lock);
  249. int devl_trylock(struct devlink *devlink)
  250. {
  251. return mutex_trylock(&devlink->lock);
  252. }
  253. EXPORT_SYMBOL_GPL(devl_trylock);
  254. void devl_unlock(struct devlink *devlink)
  255. {
  256. mutex_unlock(&devlink->lock);
  257. }
  258. EXPORT_SYMBOL_GPL(devl_unlock);
  259. static struct devlink *
  260. devlinks_xa_find_get(struct net *net, unsigned long *indexp, xa_mark_t filter,
  261. void * (*xa_find_fn)(struct xarray *, unsigned long *,
  262. unsigned long, xa_mark_t))
  263. {
  264. struct devlink *devlink;
  265. rcu_read_lock();
  266. retry:
  267. devlink = xa_find_fn(&devlinks, indexp, ULONG_MAX, DEVLINK_REGISTERED);
  268. if (!devlink)
  269. goto unlock;
  270. /* In case devlink_unregister() was already called and "unregistering"
  271. * mark was set, do not allow to get a devlink reference here.
  272. * This prevents live-lock of devlink_unregister() wait for completion.
  273. */
  274. if (xa_get_mark(&devlinks, *indexp, DEVLINK_UNREGISTERING))
  275. goto retry;
  276. /* For a possible retry, the xa_find_after() should be always used */
  277. xa_find_fn = xa_find_after;
  278. if (!devlink_try_get(devlink))
  279. goto retry;
  280. if (!net_eq(devlink_net(devlink), net)) {
  281. devlink_put(devlink);
  282. goto retry;
  283. }
  284. unlock:
  285. rcu_read_unlock();
  286. return devlink;
  287. }
  288. static struct devlink *devlinks_xa_find_get_first(struct net *net,
  289. unsigned long *indexp,
  290. xa_mark_t filter)
  291. {
  292. return devlinks_xa_find_get(net, indexp, filter, xa_find);
  293. }
  294. static struct devlink *devlinks_xa_find_get_next(struct net *net,
  295. unsigned long *indexp,
  296. xa_mark_t filter)
  297. {
  298. return devlinks_xa_find_get(net, indexp, filter, xa_find_after);
  299. }
  300. /* Iterate over devlink pointers which were possible to get reference to.
  301. * devlink_put() needs to be called for each iterated devlink pointer
  302. * in loop body in order to release the reference.
  303. */
  304. #define devlinks_xa_for_each_get(net, index, devlink, filter) \
  305. for (index = 0, \
  306. devlink = devlinks_xa_find_get_first(net, &index, filter); \
  307. devlink; devlink = devlinks_xa_find_get_next(net, &index, filter))
  308. #define devlinks_xa_for_each_registered_get(net, index, devlink) \
  309. devlinks_xa_for_each_get(net, index, devlink, DEVLINK_REGISTERED)
  310. static struct devlink *devlink_get_from_attrs(struct net *net,
  311. struct nlattr **attrs)
  312. {
  313. struct devlink *devlink;
  314. unsigned long index;
  315. char *busname;
  316. char *devname;
  317. if (!attrs[DEVLINK_ATTR_BUS_NAME] || !attrs[DEVLINK_ATTR_DEV_NAME])
  318. return ERR_PTR(-EINVAL);
  319. busname = nla_data(attrs[DEVLINK_ATTR_BUS_NAME]);
  320. devname = nla_data(attrs[DEVLINK_ATTR_DEV_NAME]);
  321. devlinks_xa_for_each_registered_get(net, index, devlink) {
  322. if (strcmp(devlink->dev->bus->name, busname) == 0 &&
  323. strcmp(dev_name(devlink->dev), devname) == 0)
  324. return devlink;
  325. devlink_put(devlink);
  326. }
  327. return ERR_PTR(-ENODEV);
  328. }
  329. #define ASSERT_DEVLINK_PORT_REGISTERED(devlink_port) \
  330. WARN_ON_ONCE(!(devlink_port)->registered)
  331. #define ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port) \
  332. WARN_ON_ONCE((devlink_port)->registered)
  333. #define ASSERT_DEVLINK_PORT_INITIALIZED(devlink_port) \
  334. WARN_ON_ONCE(!(devlink_port)->initialized)
  335. static struct devlink_port *devlink_port_get_by_index(struct devlink *devlink,
  336. unsigned int port_index)
  337. {
  338. struct devlink_port *devlink_port;
  339. list_for_each_entry(devlink_port, &devlink->port_list, list) {
  340. if (devlink_port->index == port_index)
  341. return devlink_port;
  342. }
  343. return NULL;
  344. }
  345. static bool devlink_port_index_exists(struct devlink *devlink,
  346. unsigned int port_index)
  347. {
  348. return devlink_port_get_by_index(devlink, port_index);
  349. }
  350. static struct devlink_port *devlink_port_get_from_attrs(struct devlink *devlink,
  351. struct nlattr **attrs)
  352. {
  353. if (attrs[DEVLINK_ATTR_PORT_INDEX]) {
  354. u32 port_index = nla_get_u32(attrs[DEVLINK_ATTR_PORT_INDEX]);
  355. struct devlink_port *devlink_port;
  356. devlink_port = devlink_port_get_by_index(devlink, port_index);
  357. if (!devlink_port)
  358. return ERR_PTR(-ENODEV);
  359. return devlink_port;
  360. }
  361. return ERR_PTR(-EINVAL);
  362. }
  363. static struct devlink_port *devlink_port_get_from_info(struct devlink *devlink,
  364. struct genl_info *info)
  365. {
  366. return devlink_port_get_from_attrs(devlink, info->attrs);
  367. }
  368. static inline bool
  369. devlink_rate_is_leaf(struct devlink_rate *devlink_rate)
  370. {
  371. return devlink_rate->type == DEVLINK_RATE_TYPE_LEAF;
  372. }
  373. static inline bool
  374. devlink_rate_is_node(struct devlink_rate *devlink_rate)
  375. {
  376. return devlink_rate->type == DEVLINK_RATE_TYPE_NODE;
  377. }
  378. static struct devlink_rate *
  379. devlink_rate_leaf_get_from_info(struct devlink *devlink, struct genl_info *info)
  380. {
  381. struct devlink_rate *devlink_rate;
  382. struct devlink_port *devlink_port;
  383. devlink_port = devlink_port_get_from_attrs(devlink, info->attrs);
  384. if (IS_ERR(devlink_port))
  385. return ERR_CAST(devlink_port);
  386. devlink_rate = devlink_port->devlink_rate;
  387. return devlink_rate ?: ERR_PTR(-ENODEV);
  388. }
  389. static struct devlink_rate *
  390. devlink_rate_node_get_by_name(struct devlink *devlink, const char *node_name)
  391. {
  392. static struct devlink_rate *devlink_rate;
  393. list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
  394. if (devlink_rate_is_node(devlink_rate) &&
  395. !strcmp(node_name, devlink_rate->name))
  396. return devlink_rate;
  397. }
  398. return ERR_PTR(-ENODEV);
  399. }
  400. static struct devlink_rate *
  401. devlink_rate_node_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
  402. {
  403. const char *rate_node_name;
  404. size_t len;
  405. if (!attrs[DEVLINK_ATTR_RATE_NODE_NAME])
  406. return ERR_PTR(-EINVAL);
  407. rate_node_name = nla_data(attrs[DEVLINK_ATTR_RATE_NODE_NAME]);
  408. len = strlen(rate_node_name);
  409. /* Name cannot be empty or decimal number */
  410. if (!len || strspn(rate_node_name, "0123456789") == len)
  411. return ERR_PTR(-EINVAL);
  412. return devlink_rate_node_get_by_name(devlink, rate_node_name);
  413. }
  414. static struct devlink_rate *
  415. devlink_rate_node_get_from_info(struct devlink *devlink, struct genl_info *info)
  416. {
  417. return devlink_rate_node_get_from_attrs(devlink, info->attrs);
  418. }
  419. static struct devlink_rate *
  420. devlink_rate_get_from_info(struct devlink *devlink, struct genl_info *info)
  421. {
  422. struct nlattr **attrs = info->attrs;
  423. if (attrs[DEVLINK_ATTR_PORT_INDEX])
  424. return devlink_rate_leaf_get_from_info(devlink, info);
  425. else if (attrs[DEVLINK_ATTR_RATE_NODE_NAME])
  426. return devlink_rate_node_get_from_info(devlink, info);
  427. else
  428. return ERR_PTR(-EINVAL);
  429. }
  430. static struct devlink_linecard *
  431. devlink_linecard_get_by_index(struct devlink *devlink,
  432. unsigned int linecard_index)
  433. {
  434. struct devlink_linecard *devlink_linecard;
  435. list_for_each_entry(devlink_linecard, &devlink->linecard_list, list) {
  436. if (devlink_linecard->index == linecard_index)
  437. return devlink_linecard;
  438. }
  439. return NULL;
  440. }
  441. static bool devlink_linecard_index_exists(struct devlink *devlink,
  442. unsigned int linecard_index)
  443. {
  444. return devlink_linecard_get_by_index(devlink, linecard_index);
  445. }
  446. static struct devlink_linecard *
  447. devlink_linecard_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
  448. {
  449. if (attrs[DEVLINK_ATTR_LINECARD_INDEX]) {
  450. u32 linecard_index = nla_get_u32(attrs[DEVLINK_ATTR_LINECARD_INDEX]);
  451. struct devlink_linecard *linecard;
  452. mutex_lock(&devlink->linecards_lock);
  453. linecard = devlink_linecard_get_by_index(devlink, linecard_index);
  454. if (linecard)
  455. refcount_inc(&linecard->refcount);
  456. mutex_unlock(&devlink->linecards_lock);
  457. if (!linecard)
  458. return ERR_PTR(-ENODEV);
  459. return linecard;
  460. }
  461. return ERR_PTR(-EINVAL);
  462. }
  463. static struct devlink_linecard *
  464. devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info)
  465. {
  466. return devlink_linecard_get_from_attrs(devlink, info->attrs);
  467. }
  468. static void devlink_linecard_put(struct devlink_linecard *linecard)
  469. {
  470. if (refcount_dec_and_test(&linecard->refcount)) {
  471. mutex_destroy(&linecard->state_lock);
  472. kfree(linecard);
  473. }
  474. }
  475. struct devlink_sb {
  476. struct list_head list;
  477. unsigned int index;
  478. u32 size;
  479. u16 ingress_pools_count;
  480. u16 egress_pools_count;
  481. u16 ingress_tc_count;
  482. u16 egress_tc_count;
  483. };
  484. static u16 devlink_sb_pool_count(struct devlink_sb *devlink_sb)
  485. {
  486. return devlink_sb->ingress_pools_count + devlink_sb->egress_pools_count;
  487. }
  488. static struct devlink_sb *devlink_sb_get_by_index(struct devlink *devlink,
  489. unsigned int sb_index)
  490. {
  491. struct devlink_sb *devlink_sb;
  492. list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
  493. if (devlink_sb->index == sb_index)
  494. return devlink_sb;
  495. }
  496. return NULL;
  497. }
  498. static bool devlink_sb_index_exists(struct devlink *devlink,
  499. unsigned int sb_index)
  500. {
  501. return devlink_sb_get_by_index(devlink, sb_index);
  502. }
  503. static struct devlink_sb *devlink_sb_get_from_attrs(struct devlink *devlink,
  504. struct nlattr **attrs)
  505. {
  506. if (attrs[DEVLINK_ATTR_SB_INDEX]) {
  507. u32 sb_index = nla_get_u32(attrs[DEVLINK_ATTR_SB_INDEX]);
  508. struct devlink_sb *devlink_sb;
  509. devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
  510. if (!devlink_sb)
  511. return ERR_PTR(-ENODEV);
  512. return devlink_sb;
  513. }
  514. return ERR_PTR(-EINVAL);
  515. }
  516. static struct devlink_sb *devlink_sb_get_from_info(struct devlink *devlink,
  517. struct genl_info *info)
  518. {
  519. return devlink_sb_get_from_attrs(devlink, info->attrs);
  520. }
  521. static int devlink_sb_pool_index_get_from_attrs(struct devlink_sb *devlink_sb,
  522. struct nlattr **attrs,
  523. u16 *p_pool_index)
  524. {
  525. u16 val;
  526. if (!attrs[DEVLINK_ATTR_SB_POOL_INDEX])
  527. return -EINVAL;
  528. val = nla_get_u16(attrs[DEVLINK_ATTR_SB_POOL_INDEX]);
  529. if (val >= devlink_sb_pool_count(devlink_sb))
  530. return -EINVAL;
  531. *p_pool_index = val;
  532. return 0;
  533. }
  534. static int devlink_sb_pool_index_get_from_info(struct devlink_sb *devlink_sb,
  535. struct genl_info *info,
  536. u16 *p_pool_index)
  537. {
  538. return devlink_sb_pool_index_get_from_attrs(devlink_sb, info->attrs,
  539. p_pool_index);
  540. }
  541. static int
  542. devlink_sb_pool_type_get_from_attrs(struct nlattr **attrs,
  543. enum devlink_sb_pool_type *p_pool_type)
  544. {
  545. u8 val;
  546. if (!attrs[DEVLINK_ATTR_SB_POOL_TYPE])
  547. return -EINVAL;
  548. val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_TYPE]);
  549. if (val != DEVLINK_SB_POOL_TYPE_INGRESS &&
  550. val != DEVLINK_SB_POOL_TYPE_EGRESS)
  551. return -EINVAL;
  552. *p_pool_type = val;
  553. return 0;
  554. }
  555. static int
  556. devlink_sb_pool_type_get_from_info(struct genl_info *info,
  557. enum devlink_sb_pool_type *p_pool_type)
  558. {
  559. return devlink_sb_pool_type_get_from_attrs(info->attrs, p_pool_type);
  560. }
  561. static int
  562. devlink_sb_th_type_get_from_attrs(struct nlattr **attrs,
  563. enum devlink_sb_threshold_type *p_th_type)
  564. {
  565. u8 val;
  566. if (!attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE])
  567. return -EINVAL;
  568. val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE]);
  569. if (val != DEVLINK_SB_THRESHOLD_TYPE_STATIC &&
  570. val != DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC)
  571. return -EINVAL;
  572. *p_th_type = val;
  573. return 0;
  574. }
  575. static int
  576. devlink_sb_th_type_get_from_info(struct genl_info *info,
  577. enum devlink_sb_threshold_type *p_th_type)
  578. {
  579. return devlink_sb_th_type_get_from_attrs(info->attrs, p_th_type);
  580. }
  581. static int
  582. devlink_sb_tc_index_get_from_attrs(struct devlink_sb *devlink_sb,
  583. struct nlattr **attrs,
  584. enum devlink_sb_pool_type pool_type,
  585. u16 *p_tc_index)
  586. {
  587. u16 val;
  588. if (!attrs[DEVLINK_ATTR_SB_TC_INDEX])
  589. return -EINVAL;
  590. val = nla_get_u16(attrs[DEVLINK_ATTR_SB_TC_INDEX]);
  591. if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS &&
  592. val >= devlink_sb->ingress_tc_count)
  593. return -EINVAL;
  594. if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS &&
  595. val >= devlink_sb->egress_tc_count)
  596. return -EINVAL;
  597. *p_tc_index = val;
  598. return 0;
  599. }
  600. static int
  601. devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb,
  602. struct genl_info *info,
  603. enum devlink_sb_pool_type pool_type,
  604. u16 *p_tc_index)
  605. {
  606. return devlink_sb_tc_index_get_from_attrs(devlink_sb, info->attrs,
  607. pool_type, p_tc_index);
  608. }
  609. struct devlink_region {
  610. struct devlink *devlink;
  611. struct devlink_port *port;
  612. struct list_head list;
  613. union {
  614. const struct devlink_region_ops *ops;
  615. const struct devlink_port_region_ops *port_ops;
  616. };
  617. struct mutex snapshot_lock; /* protects snapshot_list,
  618. * max_snapshots and cur_snapshots
  619. * consistency.
  620. */
  621. struct list_head snapshot_list;
  622. u32 max_snapshots;
  623. u32 cur_snapshots;
  624. u64 size;
  625. };
  626. struct devlink_snapshot {
  627. struct list_head list;
  628. struct devlink_region *region;
  629. u8 *data;
  630. u32 id;
  631. };
  632. static struct devlink_region *
  633. devlink_region_get_by_name(struct devlink *devlink, const char *region_name)
  634. {
  635. struct devlink_region *region;
  636. list_for_each_entry(region, &devlink->region_list, list)
  637. if (!strcmp(region->ops->name, region_name))
  638. return region;
  639. return NULL;
  640. }
  641. static struct devlink_region *
  642. devlink_port_region_get_by_name(struct devlink_port *port,
  643. const char *region_name)
  644. {
  645. struct devlink_region *region;
  646. list_for_each_entry(region, &port->region_list, list)
  647. if (!strcmp(region->ops->name, region_name))
  648. return region;
  649. return NULL;
  650. }
  651. static struct devlink_snapshot *
  652. devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id)
  653. {
  654. struct devlink_snapshot *snapshot;
  655. list_for_each_entry(snapshot, &region->snapshot_list, list)
  656. if (snapshot->id == id)
  657. return snapshot;
  658. return NULL;
  659. }
  660. #define DEVLINK_NL_FLAG_NEED_PORT BIT(0)
  661. #define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(1)
  662. #define DEVLINK_NL_FLAG_NEED_RATE BIT(2)
  663. #define DEVLINK_NL_FLAG_NEED_RATE_NODE BIT(3)
  664. #define DEVLINK_NL_FLAG_NEED_LINECARD BIT(4)
  665. static int devlink_nl_pre_doit(const struct genl_ops *ops,
  666. struct sk_buff *skb, struct genl_info *info)
  667. {
  668. struct devlink_linecard *linecard;
  669. struct devlink_port *devlink_port;
  670. struct devlink *devlink;
  671. int err;
  672. devlink = devlink_get_from_attrs(genl_info_net(info), info->attrs);
  673. if (IS_ERR(devlink))
  674. return PTR_ERR(devlink);
  675. devl_lock(devlink);
  676. info->user_ptr[0] = devlink;
  677. if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
  678. devlink_port = devlink_port_get_from_info(devlink, info);
  679. if (IS_ERR(devlink_port)) {
  680. err = PTR_ERR(devlink_port);
  681. goto unlock;
  682. }
  683. info->user_ptr[1] = devlink_port;
  684. } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT) {
  685. devlink_port = devlink_port_get_from_info(devlink, info);
  686. if (!IS_ERR(devlink_port))
  687. info->user_ptr[1] = devlink_port;
  688. } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_RATE) {
  689. struct devlink_rate *devlink_rate;
  690. devlink_rate = devlink_rate_get_from_info(devlink, info);
  691. if (IS_ERR(devlink_rate)) {
  692. err = PTR_ERR(devlink_rate);
  693. goto unlock;
  694. }
  695. info->user_ptr[1] = devlink_rate;
  696. } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_RATE_NODE) {
  697. struct devlink_rate *rate_node;
  698. rate_node = devlink_rate_node_get_from_info(devlink, info);
  699. if (IS_ERR(rate_node)) {
  700. err = PTR_ERR(rate_node);
  701. goto unlock;
  702. }
  703. info->user_ptr[1] = rate_node;
  704. } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_LINECARD) {
  705. linecard = devlink_linecard_get_from_info(devlink, info);
  706. if (IS_ERR(linecard)) {
  707. err = PTR_ERR(linecard);
  708. goto unlock;
  709. }
  710. info->user_ptr[1] = linecard;
  711. }
  712. return 0;
  713. unlock:
  714. devl_unlock(devlink);
  715. devlink_put(devlink);
  716. return err;
  717. }
  718. static void devlink_nl_post_doit(const struct genl_ops *ops,
  719. struct sk_buff *skb, struct genl_info *info)
  720. {
  721. struct devlink_linecard *linecard;
  722. struct devlink *devlink;
  723. devlink = info->user_ptr[0];
  724. if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_LINECARD) {
  725. linecard = info->user_ptr[1];
  726. devlink_linecard_put(linecard);
  727. }
  728. devl_unlock(devlink);
  729. devlink_put(devlink);
  730. }
  731. static struct genl_family devlink_nl_family;
  732. enum devlink_multicast_groups {
  733. DEVLINK_MCGRP_CONFIG,
  734. };
  735. static const struct genl_multicast_group devlink_nl_mcgrps[] = {
  736. [DEVLINK_MCGRP_CONFIG] = { .name = DEVLINK_GENL_MCGRP_CONFIG_NAME },
  737. };
  738. static int devlink_nl_put_handle(struct sk_buff *msg, struct devlink *devlink)
  739. {
  740. if (nla_put_string(msg, DEVLINK_ATTR_BUS_NAME, devlink->dev->bus->name))
  741. return -EMSGSIZE;
  742. if (nla_put_string(msg, DEVLINK_ATTR_DEV_NAME, dev_name(devlink->dev)))
  743. return -EMSGSIZE;
  744. return 0;
  745. }
  746. static int devlink_nl_put_nested_handle(struct sk_buff *msg, struct devlink *devlink)
  747. {
  748. struct nlattr *nested_attr;
  749. nested_attr = nla_nest_start(msg, DEVLINK_ATTR_NESTED_DEVLINK);
  750. if (!nested_attr)
  751. return -EMSGSIZE;
  752. if (devlink_nl_put_handle(msg, devlink))
  753. goto nla_put_failure;
  754. nla_nest_end(msg, nested_attr);
  755. return 0;
  756. nla_put_failure:
  757. nla_nest_cancel(msg, nested_attr);
  758. return -EMSGSIZE;
  759. }
  760. struct devlink_reload_combination {
  761. enum devlink_reload_action action;
  762. enum devlink_reload_limit limit;
  763. };
  764. static const struct devlink_reload_combination devlink_reload_invalid_combinations[] = {
  765. {
  766. /* can't reinitialize driver with no down time */
  767. .action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
  768. .limit = DEVLINK_RELOAD_LIMIT_NO_RESET,
  769. },
  770. };
  771. static bool
  772. devlink_reload_combination_is_invalid(enum devlink_reload_action action,
  773. enum devlink_reload_limit limit)
  774. {
  775. int i;
  776. for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++)
  777. if (devlink_reload_invalid_combinations[i].action == action &&
  778. devlink_reload_invalid_combinations[i].limit == limit)
  779. return true;
  780. return false;
  781. }
  782. static bool
  783. devlink_reload_action_is_supported(struct devlink *devlink, enum devlink_reload_action action)
  784. {
  785. return test_bit(action, &devlink->ops->reload_actions);
  786. }
  787. static bool
  788. devlink_reload_limit_is_supported(struct devlink *devlink, enum devlink_reload_limit limit)
  789. {
  790. return test_bit(limit, &devlink->ops->reload_limits);
  791. }
  792. static int devlink_reload_stat_put(struct sk_buff *msg,
  793. enum devlink_reload_limit limit, u32 value)
  794. {
  795. struct nlattr *reload_stats_entry;
  796. reload_stats_entry = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS_ENTRY);
  797. if (!reload_stats_entry)
  798. return -EMSGSIZE;
  799. if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_STATS_LIMIT, limit) ||
  800. nla_put_u32(msg, DEVLINK_ATTR_RELOAD_STATS_VALUE, value))
  801. goto nla_put_failure;
  802. nla_nest_end(msg, reload_stats_entry);
  803. return 0;
  804. nla_put_failure:
  805. nla_nest_cancel(msg, reload_stats_entry);
  806. return -EMSGSIZE;
  807. }
  808. static int devlink_reload_stats_put(struct sk_buff *msg, struct devlink *devlink, bool is_remote)
  809. {
  810. struct nlattr *reload_stats_attr, *act_info, *act_stats;
  811. int i, j, stat_idx;
  812. u32 value;
  813. if (!is_remote)
  814. reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS);
  815. else
  816. reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_REMOTE_RELOAD_STATS);
  817. if (!reload_stats_attr)
  818. return -EMSGSIZE;
  819. for (i = 0; i <= DEVLINK_RELOAD_ACTION_MAX; i++) {
  820. if ((!is_remote &&
  821. !devlink_reload_action_is_supported(devlink, i)) ||
  822. i == DEVLINK_RELOAD_ACTION_UNSPEC)
  823. continue;
  824. act_info = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_INFO);
  825. if (!act_info)
  826. goto nla_put_failure;
  827. if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_ACTION, i))
  828. goto action_info_nest_cancel;
  829. act_stats = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_STATS);
  830. if (!act_stats)
  831. goto action_info_nest_cancel;
  832. for (j = 0; j <= DEVLINK_RELOAD_LIMIT_MAX; j++) {
  833. /* Remote stats are shown even if not locally supported.
  834. * Stats of actions with unspecified limit are shown
  835. * though drivers don't need to register unspecified
  836. * limit.
  837. */
  838. if ((!is_remote && j != DEVLINK_RELOAD_LIMIT_UNSPEC &&
  839. !devlink_reload_limit_is_supported(devlink, j)) ||
  840. devlink_reload_combination_is_invalid(i, j))
  841. continue;
  842. stat_idx = j * __DEVLINK_RELOAD_ACTION_MAX + i;
  843. if (!is_remote)
  844. value = devlink->stats.reload_stats[stat_idx];
  845. else
  846. value = devlink->stats.remote_reload_stats[stat_idx];
  847. if (devlink_reload_stat_put(msg, j, value))
  848. goto action_stats_nest_cancel;
  849. }
  850. nla_nest_end(msg, act_stats);
  851. nla_nest_end(msg, act_info);
  852. }
  853. nla_nest_end(msg, reload_stats_attr);
  854. return 0;
  855. action_stats_nest_cancel:
  856. nla_nest_cancel(msg, act_stats);
  857. action_info_nest_cancel:
  858. nla_nest_cancel(msg, act_info);
  859. nla_put_failure:
  860. nla_nest_cancel(msg, reload_stats_attr);
  861. return -EMSGSIZE;
  862. }
  863. static int devlink_nl_fill(struct sk_buff *msg, struct devlink *devlink,
  864. enum devlink_command cmd, u32 portid,
  865. u32 seq, int flags)
  866. {
  867. struct nlattr *dev_stats;
  868. void *hdr;
  869. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  870. if (!hdr)
  871. return -EMSGSIZE;
  872. if (devlink_nl_put_handle(msg, devlink))
  873. goto nla_put_failure;
  874. if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_FAILED, devlink->reload_failed))
  875. goto nla_put_failure;
  876. dev_stats = nla_nest_start(msg, DEVLINK_ATTR_DEV_STATS);
  877. if (!dev_stats)
  878. goto nla_put_failure;
  879. if (devlink_reload_stats_put(msg, devlink, false))
  880. goto dev_stats_nest_cancel;
  881. if (devlink_reload_stats_put(msg, devlink, true))
  882. goto dev_stats_nest_cancel;
  883. nla_nest_end(msg, dev_stats);
  884. genlmsg_end(msg, hdr);
  885. return 0;
  886. dev_stats_nest_cancel:
  887. nla_nest_cancel(msg, dev_stats);
  888. nla_put_failure:
  889. genlmsg_cancel(msg, hdr);
  890. return -EMSGSIZE;
  891. }
  892. static void devlink_notify(struct devlink *devlink, enum devlink_command cmd)
  893. {
  894. struct sk_buff *msg;
  895. int err;
  896. WARN_ON(cmd != DEVLINK_CMD_NEW && cmd != DEVLINK_CMD_DEL);
  897. WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED));
  898. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  899. if (!msg)
  900. return;
  901. err = devlink_nl_fill(msg, devlink, cmd, 0, 0, 0);
  902. if (err) {
  903. nlmsg_free(msg);
  904. return;
  905. }
  906. genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
  907. msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
  908. }
  909. static int devlink_nl_port_attrs_put(struct sk_buff *msg,
  910. struct devlink_port *devlink_port)
  911. {
  912. struct devlink_port_attrs *attrs = &devlink_port->attrs;
  913. if (!devlink_port->attrs_set)
  914. return 0;
  915. if (attrs->lanes) {
  916. if (nla_put_u32(msg, DEVLINK_ATTR_PORT_LANES, attrs->lanes))
  917. return -EMSGSIZE;
  918. }
  919. if (nla_put_u8(msg, DEVLINK_ATTR_PORT_SPLITTABLE, attrs->splittable))
  920. return -EMSGSIZE;
  921. if (nla_put_u16(msg, DEVLINK_ATTR_PORT_FLAVOUR, attrs->flavour))
  922. return -EMSGSIZE;
  923. switch (devlink_port->attrs.flavour) {
  924. case DEVLINK_PORT_FLAVOUR_PCI_PF:
  925. if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
  926. attrs->pci_pf.controller) ||
  927. nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER, attrs->pci_pf.pf))
  928. return -EMSGSIZE;
  929. if (nla_put_u8(msg, DEVLINK_ATTR_PORT_EXTERNAL, attrs->pci_pf.external))
  930. return -EMSGSIZE;
  931. break;
  932. case DEVLINK_PORT_FLAVOUR_PCI_VF:
  933. if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
  934. attrs->pci_vf.controller) ||
  935. nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER, attrs->pci_vf.pf) ||
  936. nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_VF_NUMBER, attrs->pci_vf.vf))
  937. return -EMSGSIZE;
  938. if (nla_put_u8(msg, DEVLINK_ATTR_PORT_EXTERNAL, attrs->pci_vf.external))
  939. return -EMSGSIZE;
  940. break;
  941. case DEVLINK_PORT_FLAVOUR_PCI_SF:
  942. if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
  943. attrs->pci_sf.controller) ||
  944. nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER,
  945. attrs->pci_sf.pf) ||
  946. nla_put_u32(msg, DEVLINK_ATTR_PORT_PCI_SF_NUMBER,
  947. attrs->pci_sf.sf))
  948. return -EMSGSIZE;
  949. break;
  950. case DEVLINK_PORT_FLAVOUR_PHYSICAL:
  951. case DEVLINK_PORT_FLAVOUR_CPU:
  952. case DEVLINK_PORT_FLAVOUR_DSA:
  953. if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER,
  954. attrs->phys.port_number))
  955. return -EMSGSIZE;
  956. if (!attrs->split)
  957. return 0;
  958. if (nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_GROUP,
  959. attrs->phys.port_number))
  960. return -EMSGSIZE;
  961. if (nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER,
  962. attrs->phys.split_subport_number))
  963. return -EMSGSIZE;
  964. break;
  965. default:
  966. break;
  967. }
  968. return 0;
  969. }
  970. static int devlink_port_fn_hw_addr_fill(const struct devlink_ops *ops,
  971. struct devlink_port *port,
  972. struct sk_buff *msg,
  973. struct netlink_ext_ack *extack,
  974. bool *msg_updated)
  975. {
  976. u8 hw_addr[MAX_ADDR_LEN];
  977. int hw_addr_len;
  978. int err;
  979. if (!ops->port_function_hw_addr_get)
  980. return 0;
  981. err = ops->port_function_hw_addr_get(port, hw_addr, &hw_addr_len,
  982. extack);
  983. if (err) {
  984. if (err == -EOPNOTSUPP)
  985. return 0;
  986. return err;
  987. }
  988. err = nla_put(msg, DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, hw_addr_len, hw_addr);
  989. if (err)
  990. return err;
  991. *msg_updated = true;
  992. return 0;
  993. }
  994. static int devlink_nl_rate_fill(struct sk_buff *msg,
  995. struct devlink_rate *devlink_rate,
  996. enum devlink_command cmd, u32 portid, u32 seq,
  997. int flags, struct netlink_ext_ack *extack)
  998. {
  999. struct devlink *devlink = devlink_rate->devlink;
  1000. void *hdr;
  1001. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  1002. if (!hdr)
  1003. return -EMSGSIZE;
  1004. if (devlink_nl_put_handle(msg, devlink))
  1005. goto nla_put_failure;
  1006. if (nla_put_u16(msg, DEVLINK_ATTR_RATE_TYPE, devlink_rate->type))
  1007. goto nla_put_failure;
  1008. if (devlink_rate_is_leaf(devlink_rate)) {
  1009. if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
  1010. devlink_rate->devlink_port->index))
  1011. goto nla_put_failure;
  1012. } else if (devlink_rate_is_node(devlink_rate)) {
  1013. if (nla_put_string(msg, DEVLINK_ATTR_RATE_NODE_NAME,
  1014. devlink_rate->name))
  1015. goto nla_put_failure;
  1016. }
  1017. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_RATE_TX_SHARE,
  1018. devlink_rate->tx_share, DEVLINK_ATTR_PAD))
  1019. goto nla_put_failure;
  1020. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_RATE_TX_MAX,
  1021. devlink_rate->tx_max, DEVLINK_ATTR_PAD))
  1022. goto nla_put_failure;
  1023. if (devlink_rate->parent)
  1024. if (nla_put_string(msg, DEVLINK_ATTR_RATE_PARENT_NODE_NAME,
  1025. devlink_rate->parent->name))
  1026. goto nla_put_failure;
  1027. genlmsg_end(msg, hdr);
  1028. return 0;
  1029. nla_put_failure:
  1030. genlmsg_cancel(msg, hdr);
  1031. return -EMSGSIZE;
  1032. }
  1033. static bool
  1034. devlink_port_fn_state_valid(enum devlink_port_fn_state state)
  1035. {
  1036. return state == DEVLINK_PORT_FN_STATE_INACTIVE ||
  1037. state == DEVLINK_PORT_FN_STATE_ACTIVE;
  1038. }
  1039. static bool
  1040. devlink_port_fn_opstate_valid(enum devlink_port_fn_opstate opstate)
  1041. {
  1042. return opstate == DEVLINK_PORT_FN_OPSTATE_DETACHED ||
  1043. opstate == DEVLINK_PORT_FN_OPSTATE_ATTACHED;
  1044. }
  1045. static int devlink_port_fn_state_fill(const struct devlink_ops *ops,
  1046. struct devlink_port *port,
  1047. struct sk_buff *msg,
  1048. struct netlink_ext_ack *extack,
  1049. bool *msg_updated)
  1050. {
  1051. enum devlink_port_fn_opstate opstate;
  1052. enum devlink_port_fn_state state;
  1053. int err;
  1054. if (!ops->port_fn_state_get)
  1055. return 0;
  1056. err = ops->port_fn_state_get(port, &state, &opstate, extack);
  1057. if (err) {
  1058. if (err == -EOPNOTSUPP)
  1059. return 0;
  1060. return err;
  1061. }
  1062. if (!devlink_port_fn_state_valid(state)) {
  1063. WARN_ON_ONCE(1);
  1064. NL_SET_ERR_MSG_MOD(extack, "Invalid state read from driver");
  1065. return -EINVAL;
  1066. }
  1067. if (!devlink_port_fn_opstate_valid(opstate)) {
  1068. WARN_ON_ONCE(1);
  1069. NL_SET_ERR_MSG_MOD(extack,
  1070. "Invalid operational state read from driver");
  1071. return -EINVAL;
  1072. }
  1073. if (nla_put_u8(msg, DEVLINK_PORT_FN_ATTR_STATE, state) ||
  1074. nla_put_u8(msg, DEVLINK_PORT_FN_ATTR_OPSTATE, opstate))
  1075. return -EMSGSIZE;
  1076. *msg_updated = true;
  1077. return 0;
  1078. }
  1079. static int
  1080. devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *port,
  1081. struct netlink_ext_ack *extack)
  1082. {
  1083. const struct devlink_ops *ops;
  1084. struct nlattr *function_attr;
  1085. bool msg_updated = false;
  1086. int err;
  1087. function_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PORT_FUNCTION);
  1088. if (!function_attr)
  1089. return -EMSGSIZE;
  1090. ops = port->devlink->ops;
  1091. err = devlink_port_fn_hw_addr_fill(ops, port, msg, extack,
  1092. &msg_updated);
  1093. if (err)
  1094. goto out;
  1095. err = devlink_port_fn_state_fill(ops, port, msg, extack, &msg_updated);
  1096. out:
  1097. if (err || !msg_updated)
  1098. nla_nest_cancel(msg, function_attr);
  1099. else
  1100. nla_nest_end(msg, function_attr);
  1101. return err;
  1102. }
  1103. static int devlink_nl_port_fill(struct sk_buff *msg,
  1104. struct devlink_port *devlink_port,
  1105. enum devlink_command cmd, u32 portid, u32 seq,
  1106. int flags, struct netlink_ext_ack *extack)
  1107. {
  1108. struct devlink *devlink = devlink_port->devlink;
  1109. void *hdr;
  1110. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  1111. if (!hdr)
  1112. return -EMSGSIZE;
  1113. if (devlink_nl_put_handle(msg, devlink))
  1114. goto nla_put_failure;
  1115. if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
  1116. goto nla_put_failure;
  1117. /* Hold rtnl lock while accessing port's netdev attributes. */
  1118. rtnl_lock();
  1119. spin_lock_bh(&devlink_port->type_lock);
  1120. if (nla_put_u16(msg, DEVLINK_ATTR_PORT_TYPE, devlink_port->type))
  1121. goto nla_put_failure_type_locked;
  1122. if (devlink_port->desired_type != DEVLINK_PORT_TYPE_NOTSET &&
  1123. nla_put_u16(msg, DEVLINK_ATTR_PORT_DESIRED_TYPE,
  1124. devlink_port->desired_type))
  1125. goto nla_put_failure_type_locked;
  1126. if (devlink_port->type == DEVLINK_PORT_TYPE_ETH) {
  1127. struct net *net = devlink_net(devlink_port->devlink);
  1128. struct net_device *netdev = devlink_port->type_dev;
  1129. if (netdev && net_eq(net, dev_net(netdev)) &&
  1130. (nla_put_u32(msg, DEVLINK_ATTR_PORT_NETDEV_IFINDEX,
  1131. netdev->ifindex) ||
  1132. nla_put_string(msg, DEVLINK_ATTR_PORT_NETDEV_NAME,
  1133. netdev->name)))
  1134. goto nla_put_failure_type_locked;
  1135. }
  1136. if (devlink_port->type == DEVLINK_PORT_TYPE_IB) {
  1137. struct ib_device *ibdev = devlink_port->type_dev;
  1138. if (ibdev &&
  1139. nla_put_string(msg, DEVLINK_ATTR_PORT_IBDEV_NAME,
  1140. ibdev->name))
  1141. goto nla_put_failure_type_locked;
  1142. }
  1143. spin_unlock_bh(&devlink_port->type_lock);
  1144. rtnl_unlock();
  1145. if (devlink_nl_port_attrs_put(msg, devlink_port))
  1146. goto nla_put_failure;
  1147. if (devlink_nl_port_function_attrs_put(msg, devlink_port, extack))
  1148. goto nla_put_failure;
  1149. if (devlink_port->linecard &&
  1150. nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX,
  1151. devlink_port->linecard->index))
  1152. goto nla_put_failure;
  1153. genlmsg_end(msg, hdr);
  1154. return 0;
  1155. nla_put_failure_type_locked:
  1156. spin_unlock_bh(&devlink_port->type_lock);
  1157. rtnl_unlock();
  1158. nla_put_failure:
  1159. genlmsg_cancel(msg, hdr);
  1160. return -EMSGSIZE;
  1161. }
  1162. static void devlink_port_notify(struct devlink_port *devlink_port,
  1163. enum devlink_command cmd)
  1164. {
  1165. struct devlink *devlink = devlink_port->devlink;
  1166. struct sk_buff *msg;
  1167. int err;
  1168. WARN_ON(cmd != DEVLINK_CMD_PORT_NEW && cmd != DEVLINK_CMD_PORT_DEL);
  1169. if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
  1170. return;
  1171. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1172. if (!msg)
  1173. return;
  1174. err = devlink_nl_port_fill(msg, devlink_port, cmd, 0, 0, 0, NULL);
  1175. if (err) {
  1176. nlmsg_free(msg);
  1177. return;
  1178. }
  1179. genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
  1180. 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
  1181. }
  1182. static void devlink_rate_notify(struct devlink_rate *devlink_rate,
  1183. enum devlink_command cmd)
  1184. {
  1185. struct devlink *devlink = devlink_rate->devlink;
  1186. struct sk_buff *msg;
  1187. int err;
  1188. WARN_ON(cmd != DEVLINK_CMD_RATE_NEW && cmd != DEVLINK_CMD_RATE_DEL);
  1189. if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
  1190. return;
  1191. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1192. if (!msg)
  1193. return;
  1194. err = devlink_nl_rate_fill(msg, devlink_rate, cmd, 0, 0, 0, NULL);
  1195. if (err) {
  1196. nlmsg_free(msg);
  1197. return;
  1198. }
  1199. genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
  1200. 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
  1201. }
  1202. static int devlink_nl_cmd_rate_get_dumpit(struct sk_buff *msg,
  1203. struct netlink_callback *cb)
  1204. {
  1205. struct devlink_rate *devlink_rate;
  1206. struct devlink *devlink;
  1207. int start = cb->args[0];
  1208. unsigned long index;
  1209. int idx = 0;
  1210. int err = 0;
  1211. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  1212. devl_lock(devlink);
  1213. list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
  1214. enum devlink_command cmd = DEVLINK_CMD_RATE_NEW;
  1215. u32 id = NETLINK_CB(cb->skb).portid;
  1216. if (idx < start) {
  1217. idx++;
  1218. continue;
  1219. }
  1220. err = devlink_nl_rate_fill(msg, devlink_rate, cmd, id,
  1221. cb->nlh->nlmsg_seq,
  1222. NLM_F_MULTI, NULL);
  1223. if (err) {
  1224. devl_unlock(devlink);
  1225. devlink_put(devlink);
  1226. goto out;
  1227. }
  1228. idx++;
  1229. }
  1230. devl_unlock(devlink);
  1231. devlink_put(devlink);
  1232. }
  1233. out:
  1234. if (err != -EMSGSIZE)
  1235. return err;
  1236. cb->args[0] = idx;
  1237. return msg->len;
  1238. }
  1239. static int devlink_nl_cmd_rate_get_doit(struct sk_buff *skb,
  1240. struct genl_info *info)
  1241. {
  1242. struct devlink_rate *devlink_rate = info->user_ptr[1];
  1243. struct sk_buff *msg;
  1244. int err;
  1245. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1246. if (!msg)
  1247. return -ENOMEM;
  1248. err = devlink_nl_rate_fill(msg, devlink_rate, DEVLINK_CMD_RATE_NEW,
  1249. info->snd_portid, info->snd_seq, 0,
  1250. info->extack);
  1251. if (err) {
  1252. nlmsg_free(msg);
  1253. return err;
  1254. }
  1255. return genlmsg_reply(msg, info);
  1256. }
  1257. static bool
  1258. devlink_rate_is_parent_node(struct devlink_rate *devlink_rate,
  1259. struct devlink_rate *parent)
  1260. {
  1261. while (parent) {
  1262. if (parent == devlink_rate)
  1263. return true;
  1264. parent = parent->parent;
  1265. }
  1266. return false;
  1267. }
  1268. static int devlink_nl_cmd_get_doit(struct sk_buff *skb, struct genl_info *info)
  1269. {
  1270. struct devlink *devlink = info->user_ptr[0];
  1271. struct sk_buff *msg;
  1272. int err;
  1273. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1274. if (!msg)
  1275. return -ENOMEM;
  1276. err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
  1277. info->snd_portid, info->snd_seq, 0);
  1278. if (err) {
  1279. nlmsg_free(msg);
  1280. return err;
  1281. }
  1282. return genlmsg_reply(msg, info);
  1283. }
  1284. static int devlink_nl_cmd_get_dumpit(struct sk_buff *msg,
  1285. struct netlink_callback *cb)
  1286. {
  1287. struct devlink *devlink;
  1288. int start = cb->args[0];
  1289. unsigned long index;
  1290. int idx = 0;
  1291. int err;
  1292. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  1293. if (idx < start) {
  1294. idx++;
  1295. devlink_put(devlink);
  1296. continue;
  1297. }
  1298. devl_lock(devlink);
  1299. err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
  1300. NETLINK_CB(cb->skb).portid,
  1301. cb->nlh->nlmsg_seq, NLM_F_MULTI);
  1302. devl_unlock(devlink);
  1303. devlink_put(devlink);
  1304. if (err)
  1305. goto out;
  1306. idx++;
  1307. }
  1308. out:
  1309. cb->args[0] = idx;
  1310. return msg->len;
  1311. }
  1312. static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb,
  1313. struct genl_info *info)
  1314. {
  1315. struct devlink_port *devlink_port = info->user_ptr[1];
  1316. struct sk_buff *msg;
  1317. int err;
  1318. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1319. if (!msg)
  1320. return -ENOMEM;
  1321. err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_PORT_NEW,
  1322. info->snd_portid, info->snd_seq, 0,
  1323. info->extack);
  1324. if (err) {
  1325. nlmsg_free(msg);
  1326. return err;
  1327. }
  1328. return genlmsg_reply(msg, info);
  1329. }
  1330. static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
  1331. struct netlink_callback *cb)
  1332. {
  1333. struct devlink *devlink;
  1334. struct devlink_port *devlink_port;
  1335. int start = cb->args[0];
  1336. unsigned long index;
  1337. int idx = 0;
  1338. int err;
  1339. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  1340. devl_lock(devlink);
  1341. list_for_each_entry(devlink_port, &devlink->port_list, list) {
  1342. if (idx < start) {
  1343. idx++;
  1344. continue;
  1345. }
  1346. err = devlink_nl_port_fill(msg, devlink_port,
  1347. DEVLINK_CMD_NEW,
  1348. NETLINK_CB(cb->skb).portid,
  1349. cb->nlh->nlmsg_seq,
  1350. NLM_F_MULTI, cb->extack);
  1351. if (err) {
  1352. devl_unlock(devlink);
  1353. devlink_put(devlink);
  1354. goto out;
  1355. }
  1356. idx++;
  1357. }
  1358. devl_unlock(devlink);
  1359. devlink_put(devlink);
  1360. }
  1361. out:
  1362. cb->args[0] = idx;
  1363. return msg->len;
  1364. }
  1365. static int devlink_port_type_set(struct devlink_port *devlink_port,
  1366. enum devlink_port_type port_type)
  1367. {
  1368. int err;
  1369. if (!devlink_port->devlink->ops->port_type_set)
  1370. return -EOPNOTSUPP;
  1371. if (port_type == devlink_port->type)
  1372. return 0;
  1373. err = devlink_port->devlink->ops->port_type_set(devlink_port,
  1374. port_type);
  1375. if (err)
  1376. return err;
  1377. devlink_port->desired_type = port_type;
  1378. devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
  1379. return 0;
  1380. }
  1381. static int devlink_port_function_hw_addr_set(struct devlink_port *port,
  1382. const struct nlattr *attr,
  1383. struct netlink_ext_ack *extack)
  1384. {
  1385. const struct devlink_ops *ops = port->devlink->ops;
  1386. const u8 *hw_addr;
  1387. int hw_addr_len;
  1388. hw_addr = nla_data(attr);
  1389. hw_addr_len = nla_len(attr);
  1390. if (hw_addr_len > MAX_ADDR_LEN) {
  1391. NL_SET_ERR_MSG_MOD(extack, "Port function hardware address too long");
  1392. return -EINVAL;
  1393. }
  1394. if (port->type == DEVLINK_PORT_TYPE_ETH) {
  1395. if (hw_addr_len != ETH_ALEN) {
  1396. NL_SET_ERR_MSG_MOD(extack, "Address must be 6 bytes for Ethernet device");
  1397. return -EINVAL;
  1398. }
  1399. if (!is_unicast_ether_addr(hw_addr)) {
  1400. NL_SET_ERR_MSG_MOD(extack, "Non-unicast hardware address unsupported");
  1401. return -EINVAL;
  1402. }
  1403. }
  1404. if (!ops->port_function_hw_addr_set) {
  1405. NL_SET_ERR_MSG_MOD(extack, "Port doesn't support function attributes");
  1406. return -EOPNOTSUPP;
  1407. }
  1408. return ops->port_function_hw_addr_set(port, hw_addr, hw_addr_len,
  1409. extack);
  1410. }
  1411. static int devlink_port_fn_state_set(struct devlink_port *port,
  1412. const struct nlattr *attr,
  1413. struct netlink_ext_ack *extack)
  1414. {
  1415. enum devlink_port_fn_state state;
  1416. const struct devlink_ops *ops;
  1417. state = nla_get_u8(attr);
  1418. ops = port->devlink->ops;
  1419. if (!ops->port_fn_state_set) {
  1420. NL_SET_ERR_MSG_MOD(extack,
  1421. "Function does not support state setting");
  1422. return -EOPNOTSUPP;
  1423. }
  1424. return ops->port_fn_state_set(port, state, extack);
  1425. }
  1426. static int devlink_port_function_set(struct devlink_port *port,
  1427. const struct nlattr *attr,
  1428. struct netlink_ext_ack *extack)
  1429. {
  1430. struct nlattr *tb[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1];
  1431. int err;
  1432. err = nla_parse_nested(tb, DEVLINK_PORT_FUNCTION_ATTR_MAX, attr,
  1433. devlink_function_nl_policy, extack);
  1434. if (err < 0) {
  1435. NL_SET_ERR_MSG_MOD(extack, "Fail to parse port function attributes");
  1436. return err;
  1437. }
  1438. attr = tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR];
  1439. if (attr) {
  1440. err = devlink_port_function_hw_addr_set(port, attr, extack);
  1441. if (err)
  1442. return err;
  1443. }
  1444. /* Keep this as the last function attribute set, so that when
  1445. * multiple port function attributes are set along with state,
  1446. * Those can be applied first before activating the state.
  1447. */
  1448. attr = tb[DEVLINK_PORT_FN_ATTR_STATE];
  1449. if (attr)
  1450. err = devlink_port_fn_state_set(port, attr, extack);
  1451. if (!err)
  1452. devlink_port_notify(port, DEVLINK_CMD_PORT_NEW);
  1453. return err;
  1454. }
  1455. static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb,
  1456. struct genl_info *info)
  1457. {
  1458. struct devlink_port *devlink_port = info->user_ptr[1];
  1459. int err;
  1460. if (info->attrs[DEVLINK_ATTR_PORT_TYPE]) {
  1461. enum devlink_port_type port_type;
  1462. port_type = nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_TYPE]);
  1463. err = devlink_port_type_set(devlink_port, port_type);
  1464. if (err)
  1465. return err;
  1466. }
  1467. if (info->attrs[DEVLINK_ATTR_PORT_FUNCTION]) {
  1468. struct nlattr *attr = info->attrs[DEVLINK_ATTR_PORT_FUNCTION];
  1469. struct netlink_ext_ack *extack = info->extack;
  1470. err = devlink_port_function_set(devlink_port, attr, extack);
  1471. if (err)
  1472. return err;
  1473. }
  1474. return 0;
  1475. }
  1476. static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb,
  1477. struct genl_info *info)
  1478. {
  1479. struct devlink_port *devlink_port = info->user_ptr[1];
  1480. struct devlink *devlink = info->user_ptr[0];
  1481. u32 count;
  1482. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PORT_SPLIT_COUNT))
  1483. return -EINVAL;
  1484. if (!devlink->ops->port_split)
  1485. return -EOPNOTSUPP;
  1486. count = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT]);
  1487. if (!devlink_port->attrs.splittable) {
  1488. /* Split ports cannot be split. */
  1489. if (devlink_port->attrs.split)
  1490. NL_SET_ERR_MSG_MOD(info->extack, "Port cannot be split further");
  1491. else
  1492. NL_SET_ERR_MSG_MOD(info->extack, "Port cannot be split");
  1493. return -EINVAL;
  1494. }
  1495. if (count < 2 || !is_power_of_2(count) || count > devlink_port->attrs.lanes) {
  1496. NL_SET_ERR_MSG_MOD(info->extack, "Invalid split count");
  1497. return -EINVAL;
  1498. }
  1499. return devlink->ops->port_split(devlink, devlink_port, count,
  1500. info->extack);
  1501. }
  1502. static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb,
  1503. struct genl_info *info)
  1504. {
  1505. struct devlink_port *devlink_port = info->user_ptr[1];
  1506. struct devlink *devlink = info->user_ptr[0];
  1507. if (!devlink->ops->port_unsplit)
  1508. return -EOPNOTSUPP;
  1509. return devlink->ops->port_unsplit(devlink, devlink_port, info->extack);
  1510. }
  1511. static int devlink_port_new_notify(struct devlink *devlink,
  1512. unsigned int port_index,
  1513. struct genl_info *info)
  1514. {
  1515. struct devlink_port *devlink_port;
  1516. struct sk_buff *msg;
  1517. int err;
  1518. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1519. if (!msg)
  1520. return -ENOMEM;
  1521. lockdep_assert_held(&devlink->lock);
  1522. devlink_port = devlink_port_get_by_index(devlink, port_index);
  1523. if (!devlink_port) {
  1524. err = -ENODEV;
  1525. goto out;
  1526. }
  1527. err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_NEW,
  1528. info->snd_portid, info->snd_seq, 0, NULL);
  1529. if (err)
  1530. goto out;
  1531. return genlmsg_reply(msg, info);
  1532. out:
  1533. nlmsg_free(msg);
  1534. return err;
  1535. }
  1536. static int devlink_nl_cmd_port_new_doit(struct sk_buff *skb,
  1537. struct genl_info *info)
  1538. {
  1539. struct netlink_ext_ack *extack = info->extack;
  1540. struct devlink_port_new_attrs new_attrs = {};
  1541. struct devlink *devlink = info->user_ptr[0];
  1542. unsigned int new_port_index;
  1543. int err;
  1544. if (!devlink->ops->port_new || !devlink->ops->port_del)
  1545. return -EOPNOTSUPP;
  1546. if (!info->attrs[DEVLINK_ATTR_PORT_FLAVOUR] ||
  1547. !info->attrs[DEVLINK_ATTR_PORT_PCI_PF_NUMBER]) {
  1548. NL_SET_ERR_MSG_MOD(extack, "Port flavour or PCI PF are not specified");
  1549. return -EINVAL;
  1550. }
  1551. new_attrs.flavour = nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_FLAVOUR]);
  1552. new_attrs.pfnum =
  1553. nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_PCI_PF_NUMBER]);
  1554. if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
  1555. /* Port index of the new port being created by driver. */
  1556. new_attrs.port_index =
  1557. nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
  1558. new_attrs.port_index_valid = true;
  1559. }
  1560. if (info->attrs[DEVLINK_ATTR_PORT_CONTROLLER_NUMBER]) {
  1561. new_attrs.controller =
  1562. nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_CONTROLLER_NUMBER]);
  1563. new_attrs.controller_valid = true;
  1564. }
  1565. if (new_attrs.flavour == DEVLINK_PORT_FLAVOUR_PCI_SF &&
  1566. info->attrs[DEVLINK_ATTR_PORT_PCI_SF_NUMBER]) {
  1567. new_attrs.sfnum = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_PCI_SF_NUMBER]);
  1568. new_attrs.sfnum_valid = true;
  1569. }
  1570. err = devlink->ops->port_new(devlink, &new_attrs, extack,
  1571. &new_port_index);
  1572. if (err)
  1573. return err;
  1574. err = devlink_port_new_notify(devlink, new_port_index, info);
  1575. if (err && err != -ENODEV) {
  1576. /* Fail to send the response; destroy newly created port. */
  1577. devlink->ops->port_del(devlink, new_port_index, extack);
  1578. }
  1579. return err;
  1580. }
  1581. static int devlink_nl_cmd_port_del_doit(struct sk_buff *skb,
  1582. struct genl_info *info)
  1583. {
  1584. struct netlink_ext_ack *extack = info->extack;
  1585. struct devlink *devlink = info->user_ptr[0];
  1586. unsigned int port_index;
  1587. if (!devlink->ops->port_del)
  1588. return -EOPNOTSUPP;
  1589. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PORT_INDEX)) {
  1590. NL_SET_ERR_MSG_MOD(extack, "Port index is not specified");
  1591. return -EINVAL;
  1592. }
  1593. port_index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
  1594. return devlink->ops->port_del(devlink, port_index, extack);
  1595. }
  1596. static int
  1597. devlink_nl_rate_parent_node_set(struct devlink_rate *devlink_rate,
  1598. struct genl_info *info,
  1599. struct nlattr *nla_parent)
  1600. {
  1601. struct devlink *devlink = devlink_rate->devlink;
  1602. const char *parent_name = nla_data(nla_parent);
  1603. const struct devlink_ops *ops = devlink->ops;
  1604. size_t len = strlen(parent_name);
  1605. struct devlink_rate *parent;
  1606. int err = -EOPNOTSUPP;
  1607. parent = devlink_rate->parent;
  1608. if (parent && len) {
  1609. NL_SET_ERR_MSG_MOD(info->extack, "Rate object already has parent.");
  1610. return -EBUSY;
  1611. } else if (parent && !len) {
  1612. if (devlink_rate_is_leaf(devlink_rate))
  1613. err = ops->rate_leaf_parent_set(devlink_rate, NULL,
  1614. devlink_rate->priv, NULL,
  1615. info->extack);
  1616. else if (devlink_rate_is_node(devlink_rate))
  1617. err = ops->rate_node_parent_set(devlink_rate, NULL,
  1618. devlink_rate->priv, NULL,
  1619. info->extack);
  1620. if (err)
  1621. return err;
  1622. refcount_dec(&parent->refcnt);
  1623. devlink_rate->parent = NULL;
  1624. } else if (!parent && len) {
  1625. parent = devlink_rate_node_get_by_name(devlink, parent_name);
  1626. if (IS_ERR(parent))
  1627. return -ENODEV;
  1628. if (parent == devlink_rate) {
  1629. NL_SET_ERR_MSG_MOD(info->extack, "Parent to self is not allowed");
  1630. return -EINVAL;
  1631. }
  1632. if (devlink_rate_is_node(devlink_rate) &&
  1633. devlink_rate_is_parent_node(devlink_rate, parent->parent)) {
  1634. NL_SET_ERR_MSG_MOD(info->extack, "Node is already a parent of parent node.");
  1635. return -EEXIST;
  1636. }
  1637. if (devlink_rate_is_leaf(devlink_rate))
  1638. err = ops->rate_leaf_parent_set(devlink_rate, parent,
  1639. devlink_rate->priv, parent->priv,
  1640. info->extack);
  1641. else if (devlink_rate_is_node(devlink_rate))
  1642. err = ops->rate_node_parent_set(devlink_rate, parent,
  1643. devlink_rate->priv, parent->priv,
  1644. info->extack);
  1645. if (err)
  1646. return err;
  1647. refcount_inc(&parent->refcnt);
  1648. devlink_rate->parent = parent;
  1649. }
  1650. return 0;
  1651. }
  1652. static int devlink_nl_rate_set(struct devlink_rate *devlink_rate,
  1653. const struct devlink_ops *ops,
  1654. struct genl_info *info)
  1655. {
  1656. struct nlattr *nla_parent, **attrs = info->attrs;
  1657. int err = -EOPNOTSUPP;
  1658. u64 rate;
  1659. if (attrs[DEVLINK_ATTR_RATE_TX_SHARE]) {
  1660. rate = nla_get_u64(attrs[DEVLINK_ATTR_RATE_TX_SHARE]);
  1661. if (devlink_rate_is_leaf(devlink_rate))
  1662. err = ops->rate_leaf_tx_share_set(devlink_rate, devlink_rate->priv,
  1663. rate, info->extack);
  1664. else if (devlink_rate_is_node(devlink_rate))
  1665. err = ops->rate_node_tx_share_set(devlink_rate, devlink_rate->priv,
  1666. rate, info->extack);
  1667. if (err)
  1668. return err;
  1669. devlink_rate->tx_share = rate;
  1670. }
  1671. if (attrs[DEVLINK_ATTR_RATE_TX_MAX]) {
  1672. rate = nla_get_u64(attrs[DEVLINK_ATTR_RATE_TX_MAX]);
  1673. if (devlink_rate_is_leaf(devlink_rate))
  1674. err = ops->rate_leaf_tx_max_set(devlink_rate, devlink_rate->priv,
  1675. rate, info->extack);
  1676. else if (devlink_rate_is_node(devlink_rate))
  1677. err = ops->rate_node_tx_max_set(devlink_rate, devlink_rate->priv,
  1678. rate, info->extack);
  1679. if (err)
  1680. return err;
  1681. devlink_rate->tx_max = rate;
  1682. }
  1683. nla_parent = attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME];
  1684. if (nla_parent) {
  1685. err = devlink_nl_rate_parent_node_set(devlink_rate, info,
  1686. nla_parent);
  1687. if (err)
  1688. return err;
  1689. }
  1690. return 0;
  1691. }
  1692. static bool devlink_rate_set_ops_supported(const struct devlink_ops *ops,
  1693. struct genl_info *info,
  1694. enum devlink_rate_type type)
  1695. {
  1696. struct nlattr **attrs = info->attrs;
  1697. if (type == DEVLINK_RATE_TYPE_LEAF) {
  1698. if (attrs[DEVLINK_ATTR_RATE_TX_SHARE] && !ops->rate_leaf_tx_share_set) {
  1699. NL_SET_ERR_MSG_MOD(info->extack, "TX share set isn't supported for the leafs");
  1700. return false;
  1701. }
  1702. if (attrs[DEVLINK_ATTR_RATE_TX_MAX] && !ops->rate_leaf_tx_max_set) {
  1703. NL_SET_ERR_MSG_MOD(info->extack, "TX max set isn't supported for the leafs");
  1704. return false;
  1705. }
  1706. if (attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] &&
  1707. !ops->rate_leaf_parent_set) {
  1708. NL_SET_ERR_MSG_MOD(info->extack, "Parent set isn't supported for the leafs");
  1709. return false;
  1710. }
  1711. } else if (type == DEVLINK_RATE_TYPE_NODE) {
  1712. if (attrs[DEVLINK_ATTR_RATE_TX_SHARE] && !ops->rate_node_tx_share_set) {
  1713. NL_SET_ERR_MSG_MOD(info->extack, "TX share set isn't supported for the nodes");
  1714. return false;
  1715. }
  1716. if (attrs[DEVLINK_ATTR_RATE_TX_MAX] && !ops->rate_node_tx_max_set) {
  1717. NL_SET_ERR_MSG_MOD(info->extack, "TX max set isn't supported for the nodes");
  1718. return false;
  1719. }
  1720. if (attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] &&
  1721. !ops->rate_node_parent_set) {
  1722. NL_SET_ERR_MSG_MOD(info->extack, "Parent set isn't supported for the nodes");
  1723. return false;
  1724. }
  1725. } else {
  1726. WARN(1, "Unknown type of rate object");
  1727. return false;
  1728. }
  1729. return true;
  1730. }
  1731. static int devlink_nl_cmd_rate_set_doit(struct sk_buff *skb,
  1732. struct genl_info *info)
  1733. {
  1734. struct devlink_rate *devlink_rate = info->user_ptr[1];
  1735. struct devlink *devlink = devlink_rate->devlink;
  1736. const struct devlink_ops *ops = devlink->ops;
  1737. int err;
  1738. if (!ops || !devlink_rate_set_ops_supported(ops, info, devlink_rate->type))
  1739. return -EOPNOTSUPP;
  1740. err = devlink_nl_rate_set(devlink_rate, ops, info);
  1741. if (!err)
  1742. devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_NEW);
  1743. return err;
  1744. }
  1745. static int devlink_nl_cmd_rate_new_doit(struct sk_buff *skb,
  1746. struct genl_info *info)
  1747. {
  1748. struct devlink *devlink = info->user_ptr[0];
  1749. struct devlink_rate *rate_node;
  1750. const struct devlink_ops *ops;
  1751. int err;
  1752. ops = devlink->ops;
  1753. if (!ops || !ops->rate_node_new || !ops->rate_node_del) {
  1754. NL_SET_ERR_MSG_MOD(info->extack, "Rate nodes aren't supported");
  1755. return -EOPNOTSUPP;
  1756. }
  1757. if (!devlink_rate_set_ops_supported(ops, info, DEVLINK_RATE_TYPE_NODE))
  1758. return -EOPNOTSUPP;
  1759. rate_node = devlink_rate_node_get_from_attrs(devlink, info->attrs);
  1760. if (!IS_ERR(rate_node))
  1761. return -EEXIST;
  1762. else if (rate_node == ERR_PTR(-EINVAL))
  1763. return -EINVAL;
  1764. rate_node = kzalloc(sizeof(*rate_node), GFP_KERNEL);
  1765. if (!rate_node)
  1766. return -ENOMEM;
  1767. rate_node->devlink = devlink;
  1768. rate_node->type = DEVLINK_RATE_TYPE_NODE;
  1769. rate_node->name = nla_strdup(info->attrs[DEVLINK_ATTR_RATE_NODE_NAME], GFP_KERNEL);
  1770. if (!rate_node->name) {
  1771. err = -ENOMEM;
  1772. goto err_strdup;
  1773. }
  1774. err = ops->rate_node_new(rate_node, &rate_node->priv, info->extack);
  1775. if (err)
  1776. goto err_node_new;
  1777. err = devlink_nl_rate_set(rate_node, ops, info);
  1778. if (err)
  1779. goto err_rate_set;
  1780. refcount_set(&rate_node->refcnt, 1);
  1781. list_add(&rate_node->list, &devlink->rate_list);
  1782. devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_NEW);
  1783. return 0;
  1784. err_rate_set:
  1785. ops->rate_node_del(rate_node, rate_node->priv, info->extack);
  1786. err_node_new:
  1787. kfree(rate_node->name);
  1788. err_strdup:
  1789. kfree(rate_node);
  1790. return err;
  1791. }
  1792. static int devlink_nl_cmd_rate_del_doit(struct sk_buff *skb,
  1793. struct genl_info *info)
  1794. {
  1795. struct devlink_rate *rate_node = info->user_ptr[1];
  1796. struct devlink *devlink = rate_node->devlink;
  1797. const struct devlink_ops *ops = devlink->ops;
  1798. int err;
  1799. if (refcount_read(&rate_node->refcnt) > 1) {
  1800. NL_SET_ERR_MSG_MOD(info->extack, "Node has children. Cannot delete node.");
  1801. return -EBUSY;
  1802. }
  1803. devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_DEL);
  1804. err = ops->rate_node_del(rate_node, rate_node->priv, info->extack);
  1805. if (rate_node->parent)
  1806. refcount_dec(&rate_node->parent->refcnt);
  1807. list_del(&rate_node->list);
  1808. kfree(rate_node->name);
  1809. kfree(rate_node);
  1810. return err;
  1811. }
  1812. struct devlink_linecard_type {
  1813. const char *type;
  1814. const void *priv;
  1815. };
  1816. static int devlink_nl_linecard_fill(struct sk_buff *msg,
  1817. struct devlink *devlink,
  1818. struct devlink_linecard *linecard,
  1819. enum devlink_command cmd, u32 portid,
  1820. u32 seq, int flags,
  1821. struct netlink_ext_ack *extack)
  1822. {
  1823. struct devlink_linecard_type *linecard_type;
  1824. struct nlattr *attr;
  1825. void *hdr;
  1826. int i;
  1827. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  1828. if (!hdr)
  1829. return -EMSGSIZE;
  1830. if (devlink_nl_put_handle(msg, devlink))
  1831. goto nla_put_failure;
  1832. if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX, linecard->index))
  1833. goto nla_put_failure;
  1834. if (nla_put_u8(msg, DEVLINK_ATTR_LINECARD_STATE, linecard->state))
  1835. goto nla_put_failure;
  1836. if (linecard->type &&
  1837. nla_put_string(msg, DEVLINK_ATTR_LINECARD_TYPE, linecard->type))
  1838. goto nla_put_failure;
  1839. if (linecard->types_count) {
  1840. attr = nla_nest_start(msg,
  1841. DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES);
  1842. if (!attr)
  1843. goto nla_put_failure;
  1844. for (i = 0; i < linecard->types_count; i++) {
  1845. linecard_type = &linecard->types[i];
  1846. if (nla_put_string(msg, DEVLINK_ATTR_LINECARD_TYPE,
  1847. linecard_type->type)) {
  1848. nla_nest_cancel(msg, attr);
  1849. goto nla_put_failure;
  1850. }
  1851. }
  1852. nla_nest_end(msg, attr);
  1853. }
  1854. if (linecard->nested_devlink &&
  1855. devlink_nl_put_nested_handle(msg, linecard->nested_devlink))
  1856. goto nla_put_failure;
  1857. genlmsg_end(msg, hdr);
  1858. return 0;
  1859. nla_put_failure:
  1860. genlmsg_cancel(msg, hdr);
  1861. return -EMSGSIZE;
  1862. }
  1863. static void devlink_linecard_notify(struct devlink_linecard *linecard,
  1864. enum devlink_command cmd)
  1865. {
  1866. struct devlink *devlink = linecard->devlink;
  1867. struct sk_buff *msg;
  1868. int err;
  1869. WARN_ON(cmd != DEVLINK_CMD_LINECARD_NEW &&
  1870. cmd != DEVLINK_CMD_LINECARD_DEL);
  1871. if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
  1872. return;
  1873. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1874. if (!msg)
  1875. return;
  1876. err = devlink_nl_linecard_fill(msg, devlink, linecard, cmd, 0, 0, 0,
  1877. NULL);
  1878. if (err) {
  1879. nlmsg_free(msg);
  1880. return;
  1881. }
  1882. genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
  1883. msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
  1884. }
  1885. static int devlink_nl_cmd_linecard_get_doit(struct sk_buff *skb,
  1886. struct genl_info *info)
  1887. {
  1888. struct devlink_linecard *linecard = info->user_ptr[1];
  1889. struct devlink *devlink = linecard->devlink;
  1890. struct sk_buff *msg;
  1891. int err;
  1892. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1893. if (!msg)
  1894. return -ENOMEM;
  1895. mutex_lock(&linecard->state_lock);
  1896. err = devlink_nl_linecard_fill(msg, devlink, linecard,
  1897. DEVLINK_CMD_LINECARD_NEW,
  1898. info->snd_portid, info->snd_seq, 0,
  1899. info->extack);
  1900. mutex_unlock(&linecard->state_lock);
  1901. if (err) {
  1902. nlmsg_free(msg);
  1903. return err;
  1904. }
  1905. return genlmsg_reply(msg, info);
  1906. }
  1907. static int devlink_nl_cmd_linecard_get_dumpit(struct sk_buff *msg,
  1908. struct netlink_callback *cb)
  1909. {
  1910. struct devlink_linecard *linecard;
  1911. struct devlink *devlink;
  1912. int start = cb->args[0];
  1913. unsigned long index;
  1914. int idx = 0;
  1915. int err;
  1916. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  1917. mutex_lock(&devlink->linecards_lock);
  1918. list_for_each_entry(linecard, &devlink->linecard_list, list) {
  1919. if (idx < start) {
  1920. idx++;
  1921. continue;
  1922. }
  1923. mutex_lock(&linecard->state_lock);
  1924. err = devlink_nl_linecard_fill(msg, devlink, linecard,
  1925. DEVLINK_CMD_LINECARD_NEW,
  1926. NETLINK_CB(cb->skb).portid,
  1927. cb->nlh->nlmsg_seq,
  1928. NLM_F_MULTI,
  1929. cb->extack);
  1930. mutex_unlock(&linecard->state_lock);
  1931. if (err) {
  1932. mutex_unlock(&devlink->linecards_lock);
  1933. devlink_put(devlink);
  1934. goto out;
  1935. }
  1936. idx++;
  1937. }
  1938. mutex_unlock(&devlink->linecards_lock);
  1939. devlink_put(devlink);
  1940. }
  1941. out:
  1942. cb->args[0] = idx;
  1943. return msg->len;
  1944. }
  1945. static struct devlink_linecard_type *
  1946. devlink_linecard_type_lookup(struct devlink_linecard *linecard,
  1947. const char *type)
  1948. {
  1949. struct devlink_linecard_type *linecard_type;
  1950. int i;
  1951. for (i = 0; i < linecard->types_count; i++) {
  1952. linecard_type = &linecard->types[i];
  1953. if (!strcmp(type, linecard_type->type))
  1954. return linecard_type;
  1955. }
  1956. return NULL;
  1957. }
  1958. static int devlink_linecard_type_set(struct devlink_linecard *linecard,
  1959. const char *type,
  1960. struct netlink_ext_ack *extack)
  1961. {
  1962. const struct devlink_linecard_ops *ops = linecard->ops;
  1963. struct devlink_linecard_type *linecard_type;
  1964. int err;
  1965. mutex_lock(&linecard->state_lock);
  1966. if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) {
  1967. NL_SET_ERR_MSG_MOD(extack, "Line card is currently being provisioned");
  1968. err = -EBUSY;
  1969. goto out;
  1970. }
  1971. if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) {
  1972. NL_SET_ERR_MSG_MOD(extack, "Line card is currently being unprovisioned");
  1973. err = -EBUSY;
  1974. goto out;
  1975. }
  1976. linecard_type = devlink_linecard_type_lookup(linecard, type);
  1977. if (!linecard_type) {
  1978. NL_SET_ERR_MSG_MOD(extack, "Unsupported line card type provided");
  1979. err = -EINVAL;
  1980. goto out;
  1981. }
  1982. if (linecard->state != DEVLINK_LINECARD_STATE_UNPROVISIONED &&
  1983. linecard->state != DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) {
  1984. NL_SET_ERR_MSG_MOD(extack, "Line card already provisioned");
  1985. err = -EBUSY;
  1986. /* Check if the line card is provisioned in the same
  1987. * way the user asks. In case it is, make the operation
  1988. * to return success.
  1989. */
  1990. if (ops->same_provision &&
  1991. ops->same_provision(linecard, linecard->priv,
  1992. linecard_type->type,
  1993. linecard_type->priv))
  1994. err = 0;
  1995. goto out;
  1996. }
  1997. linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING;
  1998. linecard->type = linecard_type->type;
  1999. devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
  2000. mutex_unlock(&linecard->state_lock);
  2001. err = ops->provision(linecard, linecard->priv, linecard_type->type,
  2002. linecard_type->priv, extack);
  2003. if (err) {
  2004. /* Provisioning failed. Assume the linecard is unprovisioned
  2005. * for future operations.
  2006. */
  2007. mutex_lock(&linecard->state_lock);
  2008. linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
  2009. linecard->type = NULL;
  2010. devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
  2011. mutex_unlock(&linecard->state_lock);
  2012. }
  2013. return err;
  2014. out:
  2015. mutex_unlock(&linecard->state_lock);
  2016. return err;
  2017. }
  2018. static int devlink_linecard_type_unset(struct devlink_linecard *linecard,
  2019. struct netlink_ext_ack *extack)
  2020. {
  2021. int err;
  2022. mutex_lock(&linecard->state_lock);
  2023. if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) {
  2024. NL_SET_ERR_MSG_MOD(extack, "Line card is currently being provisioned");
  2025. err = -EBUSY;
  2026. goto out;
  2027. }
  2028. if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) {
  2029. NL_SET_ERR_MSG_MOD(extack, "Line card is currently being unprovisioned");
  2030. err = -EBUSY;
  2031. goto out;
  2032. }
  2033. if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) {
  2034. linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
  2035. linecard->type = NULL;
  2036. devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
  2037. err = 0;
  2038. goto out;
  2039. }
  2040. if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONED) {
  2041. NL_SET_ERR_MSG_MOD(extack, "Line card is not provisioned");
  2042. err = 0;
  2043. goto out;
  2044. }
  2045. linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONING;
  2046. devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
  2047. mutex_unlock(&linecard->state_lock);
  2048. err = linecard->ops->unprovision(linecard, linecard->priv,
  2049. extack);
  2050. if (err) {
  2051. /* Unprovisioning failed. Assume the linecard is unprovisioned
  2052. * for future operations.
  2053. */
  2054. mutex_lock(&linecard->state_lock);
  2055. linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
  2056. linecard->type = NULL;
  2057. devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
  2058. mutex_unlock(&linecard->state_lock);
  2059. }
  2060. return err;
  2061. out:
  2062. mutex_unlock(&linecard->state_lock);
  2063. return err;
  2064. }
  2065. static int devlink_nl_cmd_linecard_set_doit(struct sk_buff *skb,
  2066. struct genl_info *info)
  2067. {
  2068. struct devlink_linecard *linecard = info->user_ptr[1];
  2069. struct netlink_ext_ack *extack = info->extack;
  2070. int err;
  2071. if (info->attrs[DEVLINK_ATTR_LINECARD_TYPE]) {
  2072. const char *type;
  2073. type = nla_data(info->attrs[DEVLINK_ATTR_LINECARD_TYPE]);
  2074. if (strcmp(type, "")) {
  2075. err = devlink_linecard_type_set(linecard, type, extack);
  2076. if (err)
  2077. return err;
  2078. } else {
  2079. err = devlink_linecard_type_unset(linecard, extack);
  2080. if (err)
  2081. return err;
  2082. }
  2083. }
  2084. return 0;
  2085. }
  2086. static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink,
  2087. struct devlink_sb *devlink_sb,
  2088. enum devlink_command cmd, u32 portid,
  2089. u32 seq, int flags)
  2090. {
  2091. void *hdr;
  2092. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  2093. if (!hdr)
  2094. return -EMSGSIZE;
  2095. if (devlink_nl_put_handle(msg, devlink))
  2096. goto nla_put_failure;
  2097. if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
  2098. goto nla_put_failure;
  2099. if (nla_put_u32(msg, DEVLINK_ATTR_SB_SIZE, devlink_sb->size))
  2100. goto nla_put_failure;
  2101. if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_POOL_COUNT,
  2102. devlink_sb->ingress_pools_count))
  2103. goto nla_put_failure;
  2104. if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_POOL_COUNT,
  2105. devlink_sb->egress_pools_count))
  2106. goto nla_put_failure;
  2107. if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_TC_COUNT,
  2108. devlink_sb->ingress_tc_count))
  2109. goto nla_put_failure;
  2110. if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_TC_COUNT,
  2111. devlink_sb->egress_tc_count))
  2112. goto nla_put_failure;
  2113. genlmsg_end(msg, hdr);
  2114. return 0;
  2115. nla_put_failure:
  2116. genlmsg_cancel(msg, hdr);
  2117. return -EMSGSIZE;
  2118. }
  2119. static int devlink_nl_cmd_sb_get_doit(struct sk_buff *skb,
  2120. struct genl_info *info)
  2121. {
  2122. struct devlink *devlink = info->user_ptr[0];
  2123. struct devlink_sb *devlink_sb;
  2124. struct sk_buff *msg;
  2125. int err;
  2126. devlink_sb = devlink_sb_get_from_info(devlink, info);
  2127. if (IS_ERR(devlink_sb))
  2128. return PTR_ERR(devlink_sb);
  2129. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  2130. if (!msg)
  2131. return -ENOMEM;
  2132. err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
  2133. DEVLINK_CMD_SB_NEW,
  2134. info->snd_portid, info->snd_seq, 0);
  2135. if (err) {
  2136. nlmsg_free(msg);
  2137. return err;
  2138. }
  2139. return genlmsg_reply(msg, info);
  2140. }
  2141. static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
  2142. struct netlink_callback *cb)
  2143. {
  2144. struct devlink *devlink;
  2145. struct devlink_sb *devlink_sb;
  2146. int start = cb->args[0];
  2147. unsigned long index;
  2148. int idx = 0;
  2149. int err;
  2150. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  2151. devl_lock(devlink);
  2152. list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
  2153. if (idx < start) {
  2154. idx++;
  2155. continue;
  2156. }
  2157. err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
  2158. DEVLINK_CMD_SB_NEW,
  2159. NETLINK_CB(cb->skb).portid,
  2160. cb->nlh->nlmsg_seq,
  2161. NLM_F_MULTI);
  2162. if (err) {
  2163. devl_unlock(devlink);
  2164. devlink_put(devlink);
  2165. goto out;
  2166. }
  2167. idx++;
  2168. }
  2169. devl_unlock(devlink);
  2170. devlink_put(devlink);
  2171. }
  2172. out:
  2173. cb->args[0] = idx;
  2174. return msg->len;
  2175. }
  2176. static int devlink_nl_sb_pool_fill(struct sk_buff *msg, struct devlink *devlink,
  2177. struct devlink_sb *devlink_sb,
  2178. u16 pool_index, enum devlink_command cmd,
  2179. u32 portid, u32 seq, int flags)
  2180. {
  2181. struct devlink_sb_pool_info pool_info;
  2182. void *hdr;
  2183. int err;
  2184. err = devlink->ops->sb_pool_get(devlink, devlink_sb->index,
  2185. pool_index, &pool_info);
  2186. if (err)
  2187. return err;
  2188. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  2189. if (!hdr)
  2190. return -EMSGSIZE;
  2191. if (devlink_nl_put_handle(msg, devlink))
  2192. goto nla_put_failure;
  2193. if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
  2194. goto nla_put_failure;
  2195. if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
  2196. goto nla_put_failure;
  2197. if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_info.pool_type))
  2198. goto nla_put_failure;
  2199. if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_SIZE, pool_info.size))
  2200. goto nla_put_failure;
  2201. if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE,
  2202. pool_info.threshold_type))
  2203. goto nla_put_failure;
  2204. if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_CELL_SIZE,
  2205. pool_info.cell_size))
  2206. goto nla_put_failure;
  2207. genlmsg_end(msg, hdr);
  2208. return 0;
  2209. nla_put_failure:
  2210. genlmsg_cancel(msg, hdr);
  2211. return -EMSGSIZE;
  2212. }
  2213. static int devlink_nl_cmd_sb_pool_get_doit(struct sk_buff *skb,
  2214. struct genl_info *info)
  2215. {
  2216. struct devlink *devlink = info->user_ptr[0];
  2217. struct devlink_sb *devlink_sb;
  2218. struct sk_buff *msg;
  2219. u16 pool_index;
  2220. int err;
  2221. devlink_sb = devlink_sb_get_from_info(devlink, info);
  2222. if (IS_ERR(devlink_sb))
  2223. return PTR_ERR(devlink_sb);
  2224. err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
  2225. &pool_index);
  2226. if (err)
  2227. return err;
  2228. if (!devlink->ops->sb_pool_get)
  2229. return -EOPNOTSUPP;
  2230. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  2231. if (!msg)
  2232. return -ENOMEM;
  2233. err = devlink_nl_sb_pool_fill(msg, devlink, devlink_sb, pool_index,
  2234. DEVLINK_CMD_SB_POOL_NEW,
  2235. info->snd_portid, info->snd_seq, 0);
  2236. if (err) {
  2237. nlmsg_free(msg);
  2238. return err;
  2239. }
  2240. return genlmsg_reply(msg, info);
  2241. }
  2242. static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
  2243. struct devlink *devlink,
  2244. struct devlink_sb *devlink_sb,
  2245. u32 portid, u32 seq)
  2246. {
  2247. u16 pool_count = devlink_sb_pool_count(devlink_sb);
  2248. u16 pool_index;
  2249. int err;
  2250. for (pool_index = 0; pool_index < pool_count; pool_index++) {
  2251. if (*p_idx < start) {
  2252. (*p_idx)++;
  2253. continue;
  2254. }
  2255. err = devlink_nl_sb_pool_fill(msg, devlink,
  2256. devlink_sb,
  2257. pool_index,
  2258. DEVLINK_CMD_SB_POOL_NEW,
  2259. portid, seq, NLM_F_MULTI);
  2260. if (err)
  2261. return err;
  2262. (*p_idx)++;
  2263. }
  2264. return 0;
  2265. }
  2266. static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
  2267. struct netlink_callback *cb)
  2268. {
  2269. struct devlink *devlink;
  2270. struct devlink_sb *devlink_sb;
  2271. int start = cb->args[0];
  2272. unsigned long index;
  2273. int idx = 0;
  2274. int err = 0;
  2275. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  2276. if (!devlink->ops->sb_pool_get)
  2277. goto retry;
  2278. devl_lock(devlink);
  2279. list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
  2280. err = __sb_pool_get_dumpit(msg, start, &idx, devlink,
  2281. devlink_sb,
  2282. NETLINK_CB(cb->skb).portid,
  2283. cb->nlh->nlmsg_seq);
  2284. if (err == -EOPNOTSUPP) {
  2285. err = 0;
  2286. } else if (err) {
  2287. devl_unlock(devlink);
  2288. devlink_put(devlink);
  2289. goto out;
  2290. }
  2291. }
  2292. devl_unlock(devlink);
  2293. retry:
  2294. devlink_put(devlink);
  2295. }
  2296. out:
  2297. if (err != -EMSGSIZE)
  2298. return err;
  2299. cb->args[0] = idx;
  2300. return msg->len;
  2301. }
  2302. static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
  2303. u16 pool_index, u32 size,
  2304. enum devlink_sb_threshold_type threshold_type,
  2305. struct netlink_ext_ack *extack)
  2306. {
  2307. const struct devlink_ops *ops = devlink->ops;
  2308. if (ops->sb_pool_set)
  2309. return ops->sb_pool_set(devlink, sb_index, pool_index,
  2310. size, threshold_type, extack);
  2311. return -EOPNOTSUPP;
  2312. }
  2313. static int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb,
  2314. struct genl_info *info)
  2315. {
  2316. struct devlink *devlink = info->user_ptr[0];
  2317. enum devlink_sb_threshold_type threshold_type;
  2318. struct devlink_sb *devlink_sb;
  2319. u16 pool_index;
  2320. u32 size;
  2321. int err;
  2322. devlink_sb = devlink_sb_get_from_info(devlink, info);
  2323. if (IS_ERR(devlink_sb))
  2324. return PTR_ERR(devlink_sb);
  2325. err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
  2326. &pool_index);
  2327. if (err)
  2328. return err;
  2329. err = devlink_sb_th_type_get_from_info(info, &threshold_type);
  2330. if (err)
  2331. return err;
  2332. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_POOL_SIZE))
  2333. return -EINVAL;
  2334. size = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_POOL_SIZE]);
  2335. return devlink_sb_pool_set(devlink, devlink_sb->index,
  2336. pool_index, size, threshold_type,
  2337. info->extack);
  2338. }
  2339. static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg,
  2340. struct devlink *devlink,
  2341. struct devlink_port *devlink_port,
  2342. struct devlink_sb *devlink_sb,
  2343. u16 pool_index,
  2344. enum devlink_command cmd,
  2345. u32 portid, u32 seq, int flags)
  2346. {
  2347. const struct devlink_ops *ops = devlink->ops;
  2348. u32 threshold;
  2349. void *hdr;
  2350. int err;
  2351. err = ops->sb_port_pool_get(devlink_port, devlink_sb->index,
  2352. pool_index, &threshold);
  2353. if (err)
  2354. return err;
  2355. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  2356. if (!hdr)
  2357. return -EMSGSIZE;
  2358. if (devlink_nl_put_handle(msg, devlink))
  2359. goto nla_put_failure;
  2360. if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
  2361. goto nla_put_failure;
  2362. if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
  2363. goto nla_put_failure;
  2364. if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
  2365. goto nla_put_failure;
  2366. if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold))
  2367. goto nla_put_failure;
  2368. if (ops->sb_occ_port_pool_get) {
  2369. u32 cur;
  2370. u32 max;
  2371. err = ops->sb_occ_port_pool_get(devlink_port, devlink_sb->index,
  2372. pool_index, &cur, &max);
  2373. if (err && err != -EOPNOTSUPP)
  2374. goto sb_occ_get_failure;
  2375. if (!err) {
  2376. if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
  2377. goto nla_put_failure;
  2378. if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max))
  2379. goto nla_put_failure;
  2380. }
  2381. }
  2382. genlmsg_end(msg, hdr);
  2383. return 0;
  2384. nla_put_failure:
  2385. err = -EMSGSIZE;
  2386. sb_occ_get_failure:
  2387. genlmsg_cancel(msg, hdr);
  2388. return err;
  2389. }
  2390. static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb,
  2391. struct genl_info *info)
  2392. {
  2393. struct devlink_port *devlink_port = info->user_ptr[1];
  2394. struct devlink *devlink = devlink_port->devlink;
  2395. struct devlink_sb *devlink_sb;
  2396. struct sk_buff *msg;
  2397. u16 pool_index;
  2398. int err;
  2399. devlink_sb = devlink_sb_get_from_info(devlink, info);
  2400. if (IS_ERR(devlink_sb))
  2401. return PTR_ERR(devlink_sb);
  2402. err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
  2403. &pool_index);
  2404. if (err)
  2405. return err;
  2406. if (!devlink->ops->sb_port_pool_get)
  2407. return -EOPNOTSUPP;
  2408. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  2409. if (!msg)
  2410. return -ENOMEM;
  2411. err = devlink_nl_sb_port_pool_fill(msg, devlink, devlink_port,
  2412. devlink_sb, pool_index,
  2413. DEVLINK_CMD_SB_PORT_POOL_NEW,
  2414. info->snd_portid, info->snd_seq, 0);
  2415. if (err) {
  2416. nlmsg_free(msg);
  2417. return err;
  2418. }
  2419. return genlmsg_reply(msg, info);
  2420. }
  2421. static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
  2422. struct devlink *devlink,
  2423. struct devlink_sb *devlink_sb,
  2424. u32 portid, u32 seq)
  2425. {
  2426. struct devlink_port *devlink_port;
  2427. u16 pool_count = devlink_sb_pool_count(devlink_sb);
  2428. u16 pool_index;
  2429. int err;
  2430. list_for_each_entry(devlink_port, &devlink->port_list, list) {
  2431. for (pool_index = 0; pool_index < pool_count; pool_index++) {
  2432. if (*p_idx < start) {
  2433. (*p_idx)++;
  2434. continue;
  2435. }
  2436. err = devlink_nl_sb_port_pool_fill(msg, devlink,
  2437. devlink_port,
  2438. devlink_sb,
  2439. pool_index,
  2440. DEVLINK_CMD_SB_PORT_POOL_NEW,
  2441. portid, seq,
  2442. NLM_F_MULTI);
  2443. if (err)
  2444. return err;
  2445. (*p_idx)++;
  2446. }
  2447. }
  2448. return 0;
  2449. }
  2450. static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
  2451. struct netlink_callback *cb)
  2452. {
  2453. struct devlink *devlink;
  2454. struct devlink_sb *devlink_sb;
  2455. int start = cb->args[0];
  2456. unsigned long index;
  2457. int idx = 0;
  2458. int err = 0;
  2459. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  2460. if (!devlink->ops->sb_port_pool_get)
  2461. goto retry;
  2462. devl_lock(devlink);
  2463. list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
  2464. err = __sb_port_pool_get_dumpit(msg, start, &idx,
  2465. devlink, devlink_sb,
  2466. NETLINK_CB(cb->skb).portid,
  2467. cb->nlh->nlmsg_seq);
  2468. if (err == -EOPNOTSUPP) {
  2469. err = 0;
  2470. } else if (err) {
  2471. devl_unlock(devlink);
  2472. devlink_put(devlink);
  2473. goto out;
  2474. }
  2475. }
  2476. devl_unlock(devlink);
  2477. retry:
  2478. devlink_put(devlink);
  2479. }
  2480. out:
  2481. if (err != -EMSGSIZE)
  2482. return err;
  2483. cb->args[0] = idx;
  2484. return msg->len;
  2485. }
  2486. static int devlink_sb_port_pool_set(struct devlink_port *devlink_port,
  2487. unsigned int sb_index, u16 pool_index,
  2488. u32 threshold,
  2489. struct netlink_ext_ack *extack)
  2490. {
  2491. const struct devlink_ops *ops = devlink_port->devlink->ops;
  2492. if (ops->sb_port_pool_set)
  2493. return ops->sb_port_pool_set(devlink_port, sb_index,
  2494. pool_index, threshold, extack);
  2495. return -EOPNOTSUPP;
  2496. }
  2497. static int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb,
  2498. struct genl_info *info)
  2499. {
  2500. struct devlink_port *devlink_port = info->user_ptr[1];
  2501. struct devlink *devlink = info->user_ptr[0];
  2502. struct devlink_sb *devlink_sb;
  2503. u16 pool_index;
  2504. u32 threshold;
  2505. int err;
  2506. devlink_sb = devlink_sb_get_from_info(devlink, info);
  2507. if (IS_ERR(devlink_sb))
  2508. return PTR_ERR(devlink_sb);
  2509. err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
  2510. &pool_index);
  2511. if (err)
  2512. return err;
  2513. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_THRESHOLD))
  2514. return -EINVAL;
  2515. threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
  2516. return devlink_sb_port_pool_set(devlink_port, devlink_sb->index,
  2517. pool_index, threshold, info->extack);
  2518. }
  2519. static int
  2520. devlink_nl_sb_tc_pool_bind_fill(struct sk_buff *msg, struct devlink *devlink,
  2521. struct devlink_port *devlink_port,
  2522. struct devlink_sb *devlink_sb, u16 tc_index,
  2523. enum devlink_sb_pool_type pool_type,
  2524. enum devlink_command cmd,
  2525. u32 portid, u32 seq, int flags)
  2526. {
  2527. const struct devlink_ops *ops = devlink->ops;
  2528. u16 pool_index;
  2529. u32 threshold;
  2530. void *hdr;
  2531. int err;
  2532. err = ops->sb_tc_pool_bind_get(devlink_port, devlink_sb->index,
  2533. tc_index, pool_type,
  2534. &pool_index, &threshold);
  2535. if (err)
  2536. return err;
  2537. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  2538. if (!hdr)
  2539. return -EMSGSIZE;
  2540. if (devlink_nl_put_handle(msg, devlink))
  2541. goto nla_put_failure;
  2542. if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
  2543. goto nla_put_failure;
  2544. if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
  2545. goto nla_put_failure;
  2546. if (nla_put_u16(msg, DEVLINK_ATTR_SB_TC_INDEX, tc_index))
  2547. goto nla_put_failure;
  2548. if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_type))
  2549. goto nla_put_failure;
  2550. if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
  2551. goto nla_put_failure;
  2552. if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold))
  2553. goto nla_put_failure;
  2554. if (ops->sb_occ_tc_port_bind_get) {
  2555. u32 cur;
  2556. u32 max;
  2557. err = ops->sb_occ_tc_port_bind_get(devlink_port,
  2558. devlink_sb->index,
  2559. tc_index, pool_type,
  2560. &cur, &max);
  2561. if (err && err != -EOPNOTSUPP)
  2562. return err;
  2563. if (!err) {
  2564. if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
  2565. goto nla_put_failure;
  2566. if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max))
  2567. goto nla_put_failure;
  2568. }
  2569. }
  2570. genlmsg_end(msg, hdr);
  2571. return 0;
  2572. nla_put_failure:
  2573. genlmsg_cancel(msg, hdr);
  2574. return -EMSGSIZE;
  2575. }
  2576. static int devlink_nl_cmd_sb_tc_pool_bind_get_doit(struct sk_buff *skb,
  2577. struct genl_info *info)
  2578. {
  2579. struct devlink_port *devlink_port = info->user_ptr[1];
  2580. struct devlink *devlink = devlink_port->devlink;
  2581. struct devlink_sb *devlink_sb;
  2582. struct sk_buff *msg;
  2583. enum devlink_sb_pool_type pool_type;
  2584. u16 tc_index;
  2585. int err;
  2586. devlink_sb = devlink_sb_get_from_info(devlink, info);
  2587. if (IS_ERR(devlink_sb))
  2588. return PTR_ERR(devlink_sb);
  2589. err = devlink_sb_pool_type_get_from_info(info, &pool_type);
  2590. if (err)
  2591. return err;
  2592. err = devlink_sb_tc_index_get_from_info(devlink_sb, info,
  2593. pool_type, &tc_index);
  2594. if (err)
  2595. return err;
  2596. if (!devlink->ops->sb_tc_pool_bind_get)
  2597. return -EOPNOTSUPP;
  2598. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  2599. if (!msg)
  2600. return -ENOMEM;
  2601. err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, devlink_port,
  2602. devlink_sb, tc_index, pool_type,
  2603. DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
  2604. info->snd_portid,
  2605. info->snd_seq, 0);
  2606. if (err) {
  2607. nlmsg_free(msg);
  2608. return err;
  2609. }
  2610. return genlmsg_reply(msg, info);
  2611. }
  2612. static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
  2613. int start, int *p_idx,
  2614. struct devlink *devlink,
  2615. struct devlink_sb *devlink_sb,
  2616. u32 portid, u32 seq)
  2617. {
  2618. struct devlink_port *devlink_port;
  2619. u16 tc_index;
  2620. int err;
  2621. list_for_each_entry(devlink_port, &devlink->port_list, list) {
  2622. for (tc_index = 0;
  2623. tc_index < devlink_sb->ingress_tc_count; tc_index++) {
  2624. if (*p_idx < start) {
  2625. (*p_idx)++;
  2626. continue;
  2627. }
  2628. err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink,
  2629. devlink_port,
  2630. devlink_sb,
  2631. tc_index,
  2632. DEVLINK_SB_POOL_TYPE_INGRESS,
  2633. DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
  2634. portid, seq,
  2635. NLM_F_MULTI);
  2636. if (err)
  2637. return err;
  2638. (*p_idx)++;
  2639. }
  2640. for (tc_index = 0;
  2641. tc_index < devlink_sb->egress_tc_count; tc_index++) {
  2642. if (*p_idx < start) {
  2643. (*p_idx)++;
  2644. continue;
  2645. }
  2646. err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink,
  2647. devlink_port,
  2648. devlink_sb,
  2649. tc_index,
  2650. DEVLINK_SB_POOL_TYPE_EGRESS,
  2651. DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
  2652. portid, seq,
  2653. NLM_F_MULTI);
  2654. if (err)
  2655. return err;
  2656. (*p_idx)++;
  2657. }
  2658. }
  2659. return 0;
  2660. }
  2661. static int
  2662. devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
  2663. struct netlink_callback *cb)
  2664. {
  2665. struct devlink *devlink;
  2666. struct devlink_sb *devlink_sb;
  2667. int start = cb->args[0];
  2668. unsigned long index;
  2669. int idx = 0;
  2670. int err = 0;
  2671. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  2672. if (!devlink->ops->sb_tc_pool_bind_get)
  2673. goto retry;
  2674. devl_lock(devlink);
  2675. list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
  2676. err = __sb_tc_pool_bind_get_dumpit(msg, start, &idx,
  2677. devlink,
  2678. devlink_sb,
  2679. NETLINK_CB(cb->skb).portid,
  2680. cb->nlh->nlmsg_seq);
  2681. if (err == -EOPNOTSUPP) {
  2682. err = 0;
  2683. } else if (err) {
  2684. devl_unlock(devlink);
  2685. devlink_put(devlink);
  2686. goto out;
  2687. }
  2688. }
  2689. devl_unlock(devlink);
  2690. retry:
  2691. devlink_put(devlink);
  2692. }
  2693. out:
  2694. if (err != -EMSGSIZE)
  2695. return err;
  2696. cb->args[0] = idx;
  2697. return msg->len;
  2698. }
  2699. static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
  2700. unsigned int sb_index, u16 tc_index,
  2701. enum devlink_sb_pool_type pool_type,
  2702. u16 pool_index, u32 threshold,
  2703. struct netlink_ext_ack *extack)
  2704. {
  2705. const struct devlink_ops *ops = devlink_port->devlink->ops;
  2706. if (ops->sb_tc_pool_bind_set)
  2707. return ops->sb_tc_pool_bind_set(devlink_port, sb_index,
  2708. tc_index, pool_type,
  2709. pool_index, threshold, extack);
  2710. return -EOPNOTSUPP;
  2711. }
  2712. static int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
  2713. struct genl_info *info)
  2714. {
  2715. struct devlink_port *devlink_port = info->user_ptr[1];
  2716. struct devlink *devlink = info->user_ptr[0];
  2717. enum devlink_sb_pool_type pool_type;
  2718. struct devlink_sb *devlink_sb;
  2719. u16 tc_index;
  2720. u16 pool_index;
  2721. u32 threshold;
  2722. int err;
  2723. devlink_sb = devlink_sb_get_from_info(devlink, info);
  2724. if (IS_ERR(devlink_sb))
  2725. return PTR_ERR(devlink_sb);
  2726. err = devlink_sb_pool_type_get_from_info(info, &pool_type);
  2727. if (err)
  2728. return err;
  2729. err = devlink_sb_tc_index_get_from_info(devlink_sb, info,
  2730. pool_type, &tc_index);
  2731. if (err)
  2732. return err;
  2733. err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
  2734. &pool_index);
  2735. if (err)
  2736. return err;
  2737. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_THRESHOLD))
  2738. return -EINVAL;
  2739. threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
  2740. return devlink_sb_tc_pool_bind_set(devlink_port, devlink_sb->index,
  2741. tc_index, pool_type,
  2742. pool_index, threshold, info->extack);
  2743. }
  2744. static int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
  2745. struct genl_info *info)
  2746. {
  2747. struct devlink *devlink = info->user_ptr[0];
  2748. const struct devlink_ops *ops = devlink->ops;
  2749. struct devlink_sb *devlink_sb;
  2750. devlink_sb = devlink_sb_get_from_info(devlink, info);
  2751. if (IS_ERR(devlink_sb))
  2752. return PTR_ERR(devlink_sb);
  2753. if (ops->sb_occ_snapshot)
  2754. return ops->sb_occ_snapshot(devlink, devlink_sb->index);
  2755. return -EOPNOTSUPP;
  2756. }
  2757. static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
  2758. struct genl_info *info)
  2759. {
  2760. struct devlink *devlink = info->user_ptr[0];
  2761. const struct devlink_ops *ops = devlink->ops;
  2762. struct devlink_sb *devlink_sb;
  2763. devlink_sb = devlink_sb_get_from_info(devlink, info);
  2764. if (IS_ERR(devlink_sb))
  2765. return PTR_ERR(devlink_sb);
  2766. if (ops->sb_occ_max_clear)
  2767. return ops->sb_occ_max_clear(devlink, devlink_sb->index);
  2768. return -EOPNOTSUPP;
  2769. }
  2770. static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink,
  2771. enum devlink_command cmd, u32 portid,
  2772. u32 seq, int flags)
  2773. {
  2774. const struct devlink_ops *ops = devlink->ops;
  2775. enum devlink_eswitch_encap_mode encap_mode;
  2776. u8 inline_mode;
  2777. void *hdr;
  2778. int err = 0;
  2779. u16 mode;
  2780. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  2781. if (!hdr)
  2782. return -EMSGSIZE;
  2783. err = devlink_nl_put_handle(msg, devlink);
  2784. if (err)
  2785. goto nla_put_failure;
  2786. if (ops->eswitch_mode_get) {
  2787. err = ops->eswitch_mode_get(devlink, &mode);
  2788. if (err)
  2789. goto nla_put_failure;
  2790. err = nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode);
  2791. if (err)
  2792. goto nla_put_failure;
  2793. }
  2794. if (ops->eswitch_inline_mode_get) {
  2795. err = ops->eswitch_inline_mode_get(devlink, &inline_mode);
  2796. if (err)
  2797. goto nla_put_failure;
  2798. err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_INLINE_MODE,
  2799. inline_mode);
  2800. if (err)
  2801. goto nla_put_failure;
  2802. }
  2803. if (ops->eswitch_encap_mode_get) {
  2804. err = ops->eswitch_encap_mode_get(devlink, &encap_mode);
  2805. if (err)
  2806. goto nla_put_failure;
  2807. err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_ENCAP_MODE, encap_mode);
  2808. if (err)
  2809. goto nla_put_failure;
  2810. }
  2811. genlmsg_end(msg, hdr);
  2812. return 0;
  2813. nla_put_failure:
  2814. genlmsg_cancel(msg, hdr);
  2815. return err;
  2816. }
  2817. static int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb,
  2818. struct genl_info *info)
  2819. {
  2820. struct devlink *devlink = info->user_ptr[0];
  2821. struct sk_buff *msg;
  2822. int err;
  2823. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  2824. if (!msg)
  2825. return -ENOMEM;
  2826. err = devlink_nl_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_GET,
  2827. info->snd_portid, info->snd_seq, 0);
  2828. if (err) {
  2829. nlmsg_free(msg);
  2830. return err;
  2831. }
  2832. return genlmsg_reply(msg, info);
  2833. }
  2834. static int devlink_rate_nodes_check(struct devlink *devlink, u16 mode,
  2835. struct netlink_ext_ack *extack)
  2836. {
  2837. struct devlink_rate *devlink_rate;
  2838. list_for_each_entry(devlink_rate, &devlink->rate_list, list)
  2839. if (devlink_rate_is_node(devlink_rate)) {
  2840. NL_SET_ERR_MSG_MOD(extack, "Rate node(s) exists.");
  2841. return -EBUSY;
  2842. }
  2843. return 0;
  2844. }
  2845. static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
  2846. struct genl_info *info)
  2847. {
  2848. struct devlink *devlink = info->user_ptr[0];
  2849. const struct devlink_ops *ops = devlink->ops;
  2850. enum devlink_eswitch_encap_mode encap_mode;
  2851. u8 inline_mode;
  2852. int err = 0;
  2853. u16 mode;
  2854. if (info->attrs[DEVLINK_ATTR_ESWITCH_MODE]) {
  2855. if (!ops->eswitch_mode_set)
  2856. return -EOPNOTSUPP;
  2857. mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]);
  2858. err = devlink_rate_nodes_check(devlink, mode, info->extack);
  2859. if (err)
  2860. return err;
  2861. err = ops->eswitch_mode_set(devlink, mode, info->extack);
  2862. if (err)
  2863. return err;
  2864. }
  2865. if (info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]) {
  2866. if (!ops->eswitch_inline_mode_set)
  2867. return -EOPNOTSUPP;
  2868. inline_mode = nla_get_u8(
  2869. info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]);
  2870. err = ops->eswitch_inline_mode_set(devlink, inline_mode,
  2871. info->extack);
  2872. if (err)
  2873. return err;
  2874. }
  2875. if (info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]) {
  2876. if (!ops->eswitch_encap_mode_set)
  2877. return -EOPNOTSUPP;
  2878. encap_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]);
  2879. err = ops->eswitch_encap_mode_set(devlink, encap_mode,
  2880. info->extack);
  2881. if (err)
  2882. return err;
  2883. }
  2884. return 0;
  2885. }
  2886. int devlink_dpipe_match_put(struct sk_buff *skb,
  2887. struct devlink_dpipe_match *match)
  2888. {
  2889. struct devlink_dpipe_header *header = match->header;
  2890. struct devlink_dpipe_field *field = &header->fields[match->field_id];
  2891. struct nlattr *match_attr;
  2892. match_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_MATCH);
  2893. if (!match_attr)
  2894. return -EMSGSIZE;
  2895. if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_MATCH_TYPE, match->type) ||
  2896. nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_INDEX, match->header_index) ||
  2897. nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
  2898. nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
  2899. nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
  2900. goto nla_put_failure;
  2901. nla_nest_end(skb, match_attr);
  2902. return 0;
  2903. nla_put_failure:
  2904. nla_nest_cancel(skb, match_attr);
  2905. return -EMSGSIZE;
  2906. }
  2907. EXPORT_SYMBOL_GPL(devlink_dpipe_match_put);
  2908. static int devlink_dpipe_matches_put(struct devlink_dpipe_table *table,
  2909. struct sk_buff *skb)
  2910. {
  2911. struct nlattr *matches_attr;
  2912. matches_attr = nla_nest_start_noflag(skb,
  2913. DEVLINK_ATTR_DPIPE_TABLE_MATCHES);
  2914. if (!matches_attr)
  2915. return -EMSGSIZE;
  2916. if (table->table_ops->matches_dump(table->priv, skb))
  2917. goto nla_put_failure;
  2918. nla_nest_end(skb, matches_attr);
  2919. return 0;
  2920. nla_put_failure:
  2921. nla_nest_cancel(skb, matches_attr);
  2922. return -EMSGSIZE;
  2923. }
  2924. int devlink_dpipe_action_put(struct sk_buff *skb,
  2925. struct devlink_dpipe_action *action)
  2926. {
  2927. struct devlink_dpipe_header *header = action->header;
  2928. struct devlink_dpipe_field *field = &header->fields[action->field_id];
  2929. struct nlattr *action_attr;
  2930. action_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_ACTION);
  2931. if (!action_attr)
  2932. return -EMSGSIZE;
  2933. if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_ACTION_TYPE, action->type) ||
  2934. nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_INDEX, action->header_index) ||
  2935. nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
  2936. nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
  2937. nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
  2938. goto nla_put_failure;
  2939. nla_nest_end(skb, action_attr);
  2940. return 0;
  2941. nla_put_failure:
  2942. nla_nest_cancel(skb, action_attr);
  2943. return -EMSGSIZE;
  2944. }
  2945. EXPORT_SYMBOL_GPL(devlink_dpipe_action_put);
  2946. static int devlink_dpipe_actions_put(struct devlink_dpipe_table *table,
  2947. struct sk_buff *skb)
  2948. {
  2949. struct nlattr *actions_attr;
  2950. actions_attr = nla_nest_start_noflag(skb,
  2951. DEVLINK_ATTR_DPIPE_TABLE_ACTIONS);
  2952. if (!actions_attr)
  2953. return -EMSGSIZE;
  2954. if (table->table_ops->actions_dump(table->priv, skb))
  2955. goto nla_put_failure;
  2956. nla_nest_end(skb, actions_attr);
  2957. return 0;
  2958. nla_put_failure:
  2959. nla_nest_cancel(skb, actions_attr);
  2960. return -EMSGSIZE;
  2961. }
  2962. static int devlink_dpipe_table_put(struct sk_buff *skb,
  2963. struct devlink_dpipe_table *table)
  2964. {
  2965. struct nlattr *table_attr;
  2966. u64 table_size;
  2967. table_size = table->table_ops->size_get(table->priv);
  2968. table_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_TABLE);
  2969. if (!table_attr)
  2970. return -EMSGSIZE;
  2971. if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_TABLE_NAME, table->name) ||
  2972. nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_SIZE, table_size,
  2973. DEVLINK_ATTR_PAD))
  2974. goto nla_put_failure;
  2975. if (nla_put_u8(skb, DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED,
  2976. table->counters_enabled))
  2977. goto nla_put_failure;
  2978. if (table->resource_valid) {
  2979. if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
  2980. table->resource_id, DEVLINK_ATTR_PAD) ||
  2981. nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
  2982. table->resource_units, DEVLINK_ATTR_PAD))
  2983. goto nla_put_failure;
  2984. }
  2985. if (devlink_dpipe_matches_put(table, skb))
  2986. goto nla_put_failure;
  2987. if (devlink_dpipe_actions_put(table, skb))
  2988. goto nla_put_failure;
  2989. nla_nest_end(skb, table_attr);
  2990. return 0;
  2991. nla_put_failure:
  2992. nla_nest_cancel(skb, table_attr);
  2993. return -EMSGSIZE;
  2994. }
  2995. static int devlink_dpipe_send_and_alloc_skb(struct sk_buff **pskb,
  2996. struct genl_info *info)
  2997. {
  2998. int err;
  2999. if (*pskb) {
  3000. err = genlmsg_reply(*pskb, info);
  3001. if (err)
  3002. return err;
  3003. }
  3004. *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
  3005. if (!*pskb)
  3006. return -ENOMEM;
  3007. return 0;
  3008. }
  3009. static int devlink_dpipe_tables_fill(struct genl_info *info,
  3010. enum devlink_command cmd, int flags,
  3011. struct list_head *dpipe_tables,
  3012. const char *table_name)
  3013. {
  3014. struct devlink *devlink = info->user_ptr[0];
  3015. struct devlink_dpipe_table *table;
  3016. struct nlattr *tables_attr;
  3017. struct sk_buff *skb = NULL;
  3018. struct nlmsghdr *nlh;
  3019. bool incomplete;
  3020. void *hdr;
  3021. int i;
  3022. int err;
  3023. table = list_first_entry(dpipe_tables,
  3024. struct devlink_dpipe_table, list);
  3025. start_again:
  3026. err = devlink_dpipe_send_and_alloc_skb(&skb, info);
  3027. if (err)
  3028. return err;
  3029. hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
  3030. &devlink_nl_family, NLM_F_MULTI, cmd);
  3031. if (!hdr) {
  3032. nlmsg_free(skb);
  3033. return -EMSGSIZE;
  3034. }
  3035. if (devlink_nl_put_handle(skb, devlink))
  3036. goto nla_put_failure;
  3037. tables_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_TABLES);
  3038. if (!tables_attr)
  3039. goto nla_put_failure;
  3040. i = 0;
  3041. incomplete = false;
  3042. list_for_each_entry_from(table, dpipe_tables, list) {
  3043. if (!table_name) {
  3044. err = devlink_dpipe_table_put(skb, table);
  3045. if (err) {
  3046. if (!i)
  3047. goto err_table_put;
  3048. incomplete = true;
  3049. break;
  3050. }
  3051. } else {
  3052. if (!strcmp(table->name, table_name)) {
  3053. err = devlink_dpipe_table_put(skb, table);
  3054. if (err)
  3055. break;
  3056. }
  3057. }
  3058. i++;
  3059. }
  3060. nla_nest_end(skb, tables_attr);
  3061. genlmsg_end(skb, hdr);
  3062. if (incomplete)
  3063. goto start_again;
  3064. send_done:
  3065. nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
  3066. NLMSG_DONE, 0, flags | NLM_F_MULTI);
  3067. if (!nlh) {
  3068. err = devlink_dpipe_send_and_alloc_skb(&skb, info);
  3069. if (err)
  3070. return err;
  3071. goto send_done;
  3072. }
  3073. return genlmsg_reply(skb, info);
  3074. nla_put_failure:
  3075. err = -EMSGSIZE;
  3076. err_table_put:
  3077. nlmsg_free(skb);
  3078. return err;
  3079. }
  3080. static int devlink_nl_cmd_dpipe_table_get(struct sk_buff *skb,
  3081. struct genl_info *info)
  3082. {
  3083. struct devlink *devlink = info->user_ptr[0];
  3084. const char *table_name = NULL;
  3085. if (info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME])
  3086. table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
  3087. return devlink_dpipe_tables_fill(info, DEVLINK_CMD_DPIPE_TABLE_GET, 0,
  3088. &devlink->dpipe_table_list,
  3089. table_name);
  3090. }
  3091. static int devlink_dpipe_value_put(struct sk_buff *skb,
  3092. struct devlink_dpipe_value *value)
  3093. {
  3094. if (nla_put(skb, DEVLINK_ATTR_DPIPE_VALUE,
  3095. value->value_size, value->value))
  3096. return -EMSGSIZE;
  3097. if (value->mask)
  3098. if (nla_put(skb, DEVLINK_ATTR_DPIPE_VALUE_MASK,
  3099. value->value_size, value->mask))
  3100. return -EMSGSIZE;
  3101. if (value->mapping_valid)
  3102. if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_VALUE_MAPPING,
  3103. value->mapping_value))
  3104. return -EMSGSIZE;
  3105. return 0;
  3106. }
  3107. static int devlink_dpipe_action_value_put(struct sk_buff *skb,
  3108. struct devlink_dpipe_value *value)
  3109. {
  3110. if (!value->action)
  3111. return -EINVAL;
  3112. if (devlink_dpipe_action_put(skb, value->action))
  3113. return -EMSGSIZE;
  3114. if (devlink_dpipe_value_put(skb, value))
  3115. return -EMSGSIZE;
  3116. return 0;
  3117. }
  3118. static int devlink_dpipe_action_values_put(struct sk_buff *skb,
  3119. struct devlink_dpipe_value *values,
  3120. unsigned int values_count)
  3121. {
  3122. struct nlattr *action_attr;
  3123. int i;
  3124. int err;
  3125. for (i = 0; i < values_count; i++) {
  3126. action_attr = nla_nest_start_noflag(skb,
  3127. DEVLINK_ATTR_DPIPE_ACTION_VALUE);
  3128. if (!action_attr)
  3129. return -EMSGSIZE;
  3130. err = devlink_dpipe_action_value_put(skb, &values[i]);
  3131. if (err)
  3132. goto err_action_value_put;
  3133. nla_nest_end(skb, action_attr);
  3134. }
  3135. return 0;
  3136. err_action_value_put:
  3137. nla_nest_cancel(skb, action_attr);
  3138. return err;
  3139. }
  3140. static int devlink_dpipe_match_value_put(struct sk_buff *skb,
  3141. struct devlink_dpipe_value *value)
  3142. {
  3143. if (!value->match)
  3144. return -EINVAL;
  3145. if (devlink_dpipe_match_put(skb, value->match))
  3146. return -EMSGSIZE;
  3147. if (devlink_dpipe_value_put(skb, value))
  3148. return -EMSGSIZE;
  3149. return 0;
  3150. }
  3151. static int devlink_dpipe_match_values_put(struct sk_buff *skb,
  3152. struct devlink_dpipe_value *values,
  3153. unsigned int values_count)
  3154. {
  3155. struct nlattr *match_attr;
  3156. int i;
  3157. int err;
  3158. for (i = 0; i < values_count; i++) {
  3159. match_attr = nla_nest_start_noflag(skb,
  3160. DEVLINK_ATTR_DPIPE_MATCH_VALUE);
  3161. if (!match_attr)
  3162. return -EMSGSIZE;
  3163. err = devlink_dpipe_match_value_put(skb, &values[i]);
  3164. if (err)
  3165. goto err_match_value_put;
  3166. nla_nest_end(skb, match_attr);
  3167. }
  3168. return 0;
  3169. err_match_value_put:
  3170. nla_nest_cancel(skb, match_attr);
  3171. return err;
  3172. }
  3173. static int devlink_dpipe_entry_put(struct sk_buff *skb,
  3174. struct devlink_dpipe_entry *entry)
  3175. {
  3176. struct nlattr *entry_attr, *matches_attr, *actions_attr;
  3177. int err;
  3178. entry_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_ENTRY);
  3179. if (!entry_attr)
  3180. return -EMSGSIZE;
  3181. if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_INDEX, entry->index,
  3182. DEVLINK_ATTR_PAD))
  3183. goto nla_put_failure;
  3184. if (entry->counter_valid)
  3185. if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_COUNTER,
  3186. entry->counter, DEVLINK_ATTR_PAD))
  3187. goto nla_put_failure;
  3188. matches_attr = nla_nest_start_noflag(skb,
  3189. DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES);
  3190. if (!matches_attr)
  3191. goto nla_put_failure;
  3192. err = devlink_dpipe_match_values_put(skb, entry->match_values,
  3193. entry->match_values_count);
  3194. if (err) {
  3195. nla_nest_cancel(skb, matches_attr);
  3196. goto err_match_values_put;
  3197. }
  3198. nla_nest_end(skb, matches_attr);
  3199. actions_attr = nla_nest_start_noflag(skb,
  3200. DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES);
  3201. if (!actions_attr)
  3202. goto nla_put_failure;
  3203. err = devlink_dpipe_action_values_put(skb, entry->action_values,
  3204. entry->action_values_count);
  3205. if (err) {
  3206. nla_nest_cancel(skb, actions_attr);
  3207. goto err_action_values_put;
  3208. }
  3209. nla_nest_end(skb, actions_attr);
  3210. nla_nest_end(skb, entry_attr);
  3211. return 0;
  3212. nla_put_failure:
  3213. err = -EMSGSIZE;
  3214. err_match_values_put:
  3215. err_action_values_put:
  3216. nla_nest_cancel(skb, entry_attr);
  3217. return err;
  3218. }
  3219. static struct devlink_dpipe_table *
  3220. devlink_dpipe_table_find(struct list_head *dpipe_tables,
  3221. const char *table_name, struct devlink *devlink)
  3222. {
  3223. struct devlink_dpipe_table *table;
  3224. list_for_each_entry_rcu(table, dpipe_tables, list,
  3225. lockdep_is_held(&devlink->lock)) {
  3226. if (!strcmp(table->name, table_name))
  3227. return table;
  3228. }
  3229. return NULL;
  3230. }
  3231. int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx)
  3232. {
  3233. struct devlink *devlink;
  3234. int err;
  3235. err = devlink_dpipe_send_and_alloc_skb(&dump_ctx->skb,
  3236. dump_ctx->info);
  3237. if (err)
  3238. return err;
  3239. dump_ctx->hdr = genlmsg_put(dump_ctx->skb,
  3240. dump_ctx->info->snd_portid,
  3241. dump_ctx->info->snd_seq,
  3242. &devlink_nl_family, NLM_F_MULTI,
  3243. dump_ctx->cmd);
  3244. if (!dump_ctx->hdr)
  3245. goto nla_put_failure;
  3246. devlink = dump_ctx->info->user_ptr[0];
  3247. if (devlink_nl_put_handle(dump_ctx->skb, devlink))
  3248. goto nla_put_failure;
  3249. dump_ctx->nest = nla_nest_start_noflag(dump_ctx->skb,
  3250. DEVLINK_ATTR_DPIPE_ENTRIES);
  3251. if (!dump_ctx->nest)
  3252. goto nla_put_failure;
  3253. return 0;
  3254. nla_put_failure:
  3255. nlmsg_free(dump_ctx->skb);
  3256. return -EMSGSIZE;
  3257. }
  3258. EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_prepare);
  3259. int devlink_dpipe_entry_ctx_append(struct devlink_dpipe_dump_ctx *dump_ctx,
  3260. struct devlink_dpipe_entry *entry)
  3261. {
  3262. return devlink_dpipe_entry_put(dump_ctx->skb, entry);
  3263. }
  3264. EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_append);
  3265. int devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx)
  3266. {
  3267. nla_nest_end(dump_ctx->skb, dump_ctx->nest);
  3268. genlmsg_end(dump_ctx->skb, dump_ctx->hdr);
  3269. return 0;
  3270. }
  3271. EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_close);
  3272. void devlink_dpipe_entry_clear(struct devlink_dpipe_entry *entry)
  3273. {
  3274. unsigned int value_count, value_index;
  3275. struct devlink_dpipe_value *value;
  3276. value = entry->action_values;
  3277. value_count = entry->action_values_count;
  3278. for (value_index = 0; value_index < value_count; value_index++) {
  3279. kfree(value[value_index].value);
  3280. kfree(value[value_index].mask);
  3281. }
  3282. value = entry->match_values;
  3283. value_count = entry->match_values_count;
  3284. for (value_index = 0; value_index < value_count; value_index++) {
  3285. kfree(value[value_index].value);
  3286. kfree(value[value_index].mask);
  3287. }
  3288. }
  3289. EXPORT_SYMBOL_GPL(devlink_dpipe_entry_clear);
  3290. static int devlink_dpipe_entries_fill(struct genl_info *info,
  3291. enum devlink_command cmd, int flags,
  3292. struct devlink_dpipe_table *table)
  3293. {
  3294. struct devlink_dpipe_dump_ctx dump_ctx;
  3295. struct nlmsghdr *nlh;
  3296. int err;
  3297. dump_ctx.skb = NULL;
  3298. dump_ctx.cmd = cmd;
  3299. dump_ctx.info = info;
  3300. err = table->table_ops->entries_dump(table->priv,
  3301. table->counters_enabled,
  3302. &dump_ctx);
  3303. if (err)
  3304. return err;
  3305. send_done:
  3306. nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq,
  3307. NLMSG_DONE, 0, flags | NLM_F_MULTI);
  3308. if (!nlh) {
  3309. err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info);
  3310. if (err)
  3311. return err;
  3312. goto send_done;
  3313. }
  3314. return genlmsg_reply(dump_ctx.skb, info);
  3315. }
  3316. static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
  3317. struct genl_info *info)
  3318. {
  3319. struct devlink *devlink = info->user_ptr[0];
  3320. struct devlink_dpipe_table *table;
  3321. const char *table_name;
  3322. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_DPIPE_TABLE_NAME))
  3323. return -EINVAL;
  3324. table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
  3325. table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
  3326. table_name, devlink);
  3327. if (!table)
  3328. return -EINVAL;
  3329. if (!table->table_ops->entries_dump)
  3330. return -EINVAL;
  3331. return devlink_dpipe_entries_fill(info, DEVLINK_CMD_DPIPE_ENTRIES_GET,
  3332. 0, table);
  3333. }
  3334. static int devlink_dpipe_fields_put(struct sk_buff *skb,
  3335. const struct devlink_dpipe_header *header)
  3336. {
  3337. struct devlink_dpipe_field *field;
  3338. struct nlattr *field_attr;
  3339. int i;
  3340. for (i = 0; i < header->fields_count; i++) {
  3341. field = &header->fields[i];
  3342. field_attr = nla_nest_start_noflag(skb,
  3343. DEVLINK_ATTR_DPIPE_FIELD);
  3344. if (!field_attr)
  3345. return -EMSGSIZE;
  3346. if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_FIELD_NAME, field->name) ||
  3347. nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
  3348. nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH, field->bitwidth) ||
  3349. nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE, field->mapping_type))
  3350. goto nla_put_failure;
  3351. nla_nest_end(skb, field_attr);
  3352. }
  3353. return 0;
  3354. nla_put_failure:
  3355. nla_nest_cancel(skb, field_attr);
  3356. return -EMSGSIZE;
  3357. }
  3358. static int devlink_dpipe_header_put(struct sk_buff *skb,
  3359. struct devlink_dpipe_header *header)
  3360. {
  3361. struct nlattr *fields_attr, *header_attr;
  3362. int err;
  3363. header_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_HEADER);
  3364. if (!header_attr)
  3365. return -EMSGSIZE;
  3366. if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_HEADER_NAME, header->name) ||
  3367. nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
  3368. nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
  3369. goto nla_put_failure;
  3370. fields_attr = nla_nest_start_noflag(skb,
  3371. DEVLINK_ATTR_DPIPE_HEADER_FIELDS);
  3372. if (!fields_attr)
  3373. goto nla_put_failure;
  3374. err = devlink_dpipe_fields_put(skb, header);
  3375. if (err) {
  3376. nla_nest_cancel(skb, fields_attr);
  3377. goto nla_put_failure;
  3378. }
  3379. nla_nest_end(skb, fields_attr);
  3380. nla_nest_end(skb, header_attr);
  3381. return 0;
  3382. nla_put_failure:
  3383. err = -EMSGSIZE;
  3384. nla_nest_cancel(skb, header_attr);
  3385. return err;
  3386. }
  3387. static int devlink_dpipe_headers_fill(struct genl_info *info,
  3388. enum devlink_command cmd, int flags,
  3389. struct devlink_dpipe_headers *
  3390. dpipe_headers)
  3391. {
  3392. struct devlink *devlink = info->user_ptr[0];
  3393. struct nlattr *headers_attr;
  3394. struct sk_buff *skb = NULL;
  3395. struct nlmsghdr *nlh;
  3396. void *hdr;
  3397. int i, j;
  3398. int err;
  3399. i = 0;
  3400. start_again:
  3401. err = devlink_dpipe_send_and_alloc_skb(&skb, info);
  3402. if (err)
  3403. return err;
  3404. hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
  3405. &devlink_nl_family, NLM_F_MULTI, cmd);
  3406. if (!hdr) {
  3407. nlmsg_free(skb);
  3408. return -EMSGSIZE;
  3409. }
  3410. if (devlink_nl_put_handle(skb, devlink))
  3411. goto nla_put_failure;
  3412. headers_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_HEADERS);
  3413. if (!headers_attr)
  3414. goto nla_put_failure;
  3415. j = 0;
  3416. for (; i < dpipe_headers->headers_count; i++) {
  3417. err = devlink_dpipe_header_put(skb, dpipe_headers->headers[i]);
  3418. if (err) {
  3419. if (!j)
  3420. goto err_table_put;
  3421. break;
  3422. }
  3423. j++;
  3424. }
  3425. nla_nest_end(skb, headers_attr);
  3426. genlmsg_end(skb, hdr);
  3427. if (i != dpipe_headers->headers_count)
  3428. goto start_again;
  3429. send_done:
  3430. nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
  3431. NLMSG_DONE, 0, flags | NLM_F_MULTI);
  3432. if (!nlh) {
  3433. err = devlink_dpipe_send_and_alloc_skb(&skb, info);
  3434. if (err)
  3435. return err;
  3436. goto send_done;
  3437. }
  3438. return genlmsg_reply(skb, info);
  3439. nla_put_failure:
  3440. err = -EMSGSIZE;
  3441. err_table_put:
  3442. nlmsg_free(skb);
  3443. return err;
  3444. }
  3445. static int devlink_nl_cmd_dpipe_headers_get(struct sk_buff *skb,
  3446. struct genl_info *info)
  3447. {
  3448. struct devlink *devlink = info->user_ptr[0];
  3449. if (!devlink->dpipe_headers)
  3450. return -EOPNOTSUPP;
  3451. return devlink_dpipe_headers_fill(info, DEVLINK_CMD_DPIPE_HEADERS_GET,
  3452. 0, devlink->dpipe_headers);
  3453. }
  3454. static int devlink_dpipe_table_counters_set(struct devlink *devlink,
  3455. const char *table_name,
  3456. bool enable)
  3457. {
  3458. struct devlink_dpipe_table *table;
  3459. table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
  3460. table_name, devlink);
  3461. if (!table)
  3462. return -EINVAL;
  3463. if (table->counter_control_extern)
  3464. return -EOPNOTSUPP;
  3465. if (!(table->counters_enabled ^ enable))
  3466. return 0;
  3467. table->counters_enabled = enable;
  3468. if (table->table_ops->counters_set_update)
  3469. table->table_ops->counters_set_update(table->priv, enable);
  3470. return 0;
  3471. }
  3472. static int devlink_nl_cmd_dpipe_table_counters_set(struct sk_buff *skb,
  3473. struct genl_info *info)
  3474. {
  3475. struct devlink *devlink = info->user_ptr[0];
  3476. const char *table_name;
  3477. bool counters_enable;
  3478. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_DPIPE_TABLE_NAME) ||
  3479. GENL_REQ_ATTR_CHECK(info,
  3480. DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED))
  3481. return -EINVAL;
  3482. table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
  3483. counters_enable = !!nla_get_u8(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED]);
  3484. return devlink_dpipe_table_counters_set(devlink, table_name,
  3485. counters_enable);
  3486. }
  3487. static struct devlink_resource *
  3488. devlink_resource_find(struct devlink *devlink,
  3489. struct devlink_resource *resource, u64 resource_id)
  3490. {
  3491. struct list_head *resource_list;
  3492. if (resource)
  3493. resource_list = &resource->resource_list;
  3494. else
  3495. resource_list = &devlink->resource_list;
  3496. list_for_each_entry(resource, resource_list, list) {
  3497. struct devlink_resource *child_resource;
  3498. if (resource->id == resource_id)
  3499. return resource;
  3500. child_resource = devlink_resource_find(devlink, resource,
  3501. resource_id);
  3502. if (child_resource)
  3503. return child_resource;
  3504. }
  3505. return NULL;
  3506. }
  3507. static void
  3508. devlink_resource_validate_children(struct devlink_resource *resource)
  3509. {
  3510. struct devlink_resource *child_resource;
  3511. bool size_valid = true;
  3512. u64 parts_size = 0;
  3513. if (list_empty(&resource->resource_list))
  3514. goto out;
  3515. list_for_each_entry(child_resource, &resource->resource_list, list)
  3516. parts_size += child_resource->size_new;
  3517. if (parts_size > resource->size_new)
  3518. size_valid = false;
  3519. out:
  3520. resource->size_valid = size_valid;
  3521. }
  3522. static int
  3523. devlink_resource_validate_size(struct devlink_resource *resource, u64 size,
  3524. struct netlink_ext_ack *extack)
  3525. {
  3526. u64 reminder;
  3527. int err = 0;
  3528. if (size > resource->size_params.size_max) {
  3529. NL_SET_ERR_MSG_MOD(extack, "Size larger than maximum");
  3530. err = -EINVAL;
  3531. }
  3532. if (size < resource->size_params.size_min) {
  3533. NL_SET_ERR_MSG_MOD(extack, "Size smaller than minimum");
  3534. err = -EINVAL;
  3535. }
  3536. div64_u64_rem(size, resource->size_params.size_granularity, &reminder);
  3537. if (reminder) {
  3538. NL_SET_ERR_MSG_MOD(extack, "Wrong granularity");
  3539. err = -EINVAL;
  3540. }
  3541. return err;
  3542. }
  3543. static int devlink_nl_cmd_resource_set(struct sk_buff *skb,
  3544. struct genl_info *info)
  3545. {
  3546. struct devlink *devlink = info->user_ptr[0];
  3547. struct devlink_resource *resource;
  3548. u64 resource_id;
  3549. u64 size;
  3550. int err;
  3551. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_RESOURCE_ID) ||
  3552. GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_RESOURCE_SIZE))
  3553. return -EINVAL;
  3554. resource_id = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_ID]);
  3555. resource = devlink_resource_find(devlink, NULL, resource_id);
  3556. if (!resource)
  3557. return -EINVAL;
  3558. size = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_SIZE]);
  3559. err = devlink_resource_validate_size(resource, size, info->extack);
  3560. if (err)
  3561. return err;
  3562. resource->size_new = size;
  3563. devlink_resource_validate_children(resource);
  3564. if (resource->parent)
  3565. devlink_resource_validate_children(resource->parent);
  3566. return 0;
  3567. }
  3568. static int
  3569. devlink_resource_size_params_put(struct devlink_resource *resource,
  3570. struct sk_buff *skb)
  3571. {
  3572. struct devlink_resource_size_params *size_params;
  3573. size_params = &resource->size_params;
  3574. if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
  3575. size_params->size_granularity, DEVLINK_ATTR_PAD) ||
  3576. nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
  3577. size_params->size_max, DEVLINK_ATTR_PAD) ||
  3578. nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
  3579. size_params->size_min, DEVLINK_ATTR_PAD) ||
  3580. nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit))
  3581. return -EMSGSIZE;
  3582. return 0;
  3583. }
  3584. static int devlink_resource_occ_put(struct devlink_resource *resource,
  3585. struct sk_buff *skb)
  3586. {
  3587. if (!resource->occ_get)
  3588. return 0;
  3589. return nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
  3590. resource->occ_get(resource->occ_get_priv),
  3591. DEVLINK_ATTR_PAD);
  3592. }
  3593. static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
  3594. struct devlink_resource *resource)
  3595. {
  3596. struct devlink_resource *child_resource;
  3597. struct nlattr *child_resource_attr;
  3598. struct nlattr *resource_attr;
  3599. resource_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_RESOURCE);
  3600. if (!resource_attr)
  3601. return -EMSGSIZE;
  3602. if (nla_put_string(skb, DEVLINK_ATTR_RESOURCE_NAME, resource->name) ||
  3603. nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE, resource->size,
  3604. DEVLINK_ATTR_PAD) ||
  3605. nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_ID, resource->id,
  3606. DEVLINK_ATTR_PAD))
  3607. goto nla_put_failure;
  3608. if (resource->size != resource->size_new)
  3609. nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
  3610. resource->size_new, DEVLINK_ATTR_PAD);
  3611. if (devlink_resource_occ_put(resource, skb))
  3612. goto nla_put_failure;
  3613. if (devlink_resource_size_params_put(resource, skb))
  3614. goto nla_put_failure;
  3615. if (list_empty(&resource->resource_list))
  3616. goto out;
  3617. if (nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_SIZE_VALID,
  3618. resource->size_valid))
  3619. goto nla_put_failure;
  3620. child_resource_attr = nla_nest_start_noflag(skb,
  3621. DEVLINK_ATTR_RESOURCE_LIST);
  3622. if (!child_resource_attr)
  3623. goto nla_put_failure;
  3624. list_for_each_entry(child_resource, &resource->resource_list, list) {
  3625. if (devlink_resource_put(devlink, skb, child_resource))
  3626. goto resource_put_failure;
  3627. }
  3628. nla_nest_end(skb, child_resource_attr);
  3629. out:
  3630. nla_nest_end(skb, resource_attr);
  3631. return 0;
  3632. resource_put_failure:
  3633. nla_nest_cancel(skb, child_resource_attr);
  3634. nla_put_failure:
  3635. nla_nest_cancel(skb, resource_attr);
  3636. return -EMSGSIZE;
  3637. }
  3638. static int devlink_resource_fill(struct genl_info *info,
  3639. enum devlink_command cmd, int flags)
  3640. {
  3641. struct devlink *devlink = info->user_ptr[0];
  3642. struct devlink_resource *resource;
  3643. struct nlattr *resources_attr;
  3644. struct sk_buff *skb = NULL;
  3645. struct nlmsghdr *nlh;
  3646. bool incomplete;
  3647. void *hdr;
  3648. int i;
  3649. int err;
  3650. resource = list_first_entry(&devlink->resource_list,
  3651. struct devlink_resource, list);
  3652. start_again:
  3653. err = devlink_dpipe_send_and_alloc_skb(&skb, info);
  3654. if (err)
  3655. return err;
  3656. hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
  3657. &devlink_nl_family, NLM_F_MULTI, cmd);
  3658. if (!hdr) {
  3659. nlmsg_free(skb);
  3660. return -EMSGSIZE;
  3661. }
  3662. if (devlink_nl_put_handle(skb, devlink))
  3663. goto nla_put_failure;
  3664. resources_attr = nla_nest_start_noflag(skb,
  3665. DEVLINK_ATTR_RESOURCE_LIST);
  3666. if (!resources_attr)
  3667. goto nla_put_failure;
  3668. incomplete = false;
  3669. i = 0;
  3670. list_for_each_entry_from(resource, &devlink->resource_list, list) {
  3671. err = devlink_resource_put(devlink, skb, resource);
  3672. if (err) {
  3673. if (!i)
  3674. goto err_resource_put;
  3675. incomplete = true;
  3676. break;
  3677. }
  3678. i++;
  3679. }
  3680. nla_nest_end(skb, resources_attr);
  3681. genlmsg_end(skb, hdr);
  3682. if (incomplete)
  3683. goto start_again;
  3684. send_done:
  3685. nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
  3686. NLMSG_DONE, 0, flags | NLM_F_MULTI);
  3687. if (!nlh) {
  3688. err = devlink_dpipe_send_and_alloc_skb(&skb, info);
  3689. if (err)
  3690. return err;
  3691. goto send_done;
  3692. }
  3693. return genlmsg_reply(skb, info);
  3694. nla_put_failure:
  3695. err = -EMSGSIZE;
  3696. err_resource_put:
  3697. nlmsg_free(skb);
  3698. return err;
  3699. }
  3700. static int devlink_nl_cmd_resource_dump(struct sk_buff *skb,
  3701. struct genl_info *info)
  3702. {
  3703. struct devlink *devlink = info->user_ptr[0];
  3704. if (list_empty(&devlink->resource_list))
  3705. return -EOPNOTSUPP;
  3706. return devlink_resource_fill(info, DEVLINK_CMD_RESOURCE_DUMP, 0);
  3707. }
  3708. static int
  3709. devlink_resources_validate(struct devlink *devlink,
  3710. struct devlink_resource *resource,
  3711. struct genl_info *info)
  3712. {
  3713. struct list_head *resource_list;
  3714. int err = 0;
  3715. if (resource)
  3716. resource_list = &resource->resource_list;
  3717. else
  3718. resource_list = &devlink->resource_list;
  3719. list_for_each_entry(resource, resource_list, list) {
  3720. if (!resource->size_valid)
  3721. return -EINVAL;
  3722. err = devlink_resources_validate(devlink, resource, info);
  3723. if (err)
  3724. return err;
  3725. }
  3726. return err;
  3727. }
  3728. static struct net *devlink_netns_get(struct sk_buff *skb,
  3729. struct genl_info *info)
  3730. {
  3731. struct nlattr *netns_pid_attr = info->attrs[DEVLINK_ATTR_NETNS_PID];
  3732. struct nlattr *netns_fd_attr = info->attrs[DEVLINK_ATTR_NETNS_FD];
  3733. struct nlattr *netns_id_attr = info->attrs[DEVLINK_ATTR_NETNS_ID];
  3734. struct net *net;
  3735. if (!!netns_pid_attr + !!netns_fd_attr + !!netns_id_attr > 1) {
  3736. NL_SET_ERR_MSG_MOD(info->extack, "multiple netns identifying attributes specified");
  3737. return ERR_PTR(-EINVAL);
  3738. }
  3739. if (netns_pid_attr) {
  3740. net = get_net_ns_by_pid(nla_get_u32(netns_pid_attr));
  3741. } else if (netns_fd_attr) {
  3742. net = get_net_ns_by_fd(nla_get_u32(netns_fd_attr));
  3743. } else if (netns_id_attr) {
  3744. net = get_net_ns_by_id(sock_net(skb->sk),
  3745. nla_get_u32(netns_id_attr));
  3746. if (!net)
  3747. net = ERR_PTR(-EINVAL);
  3748. } else {
  3749. WARN_ON(1);
  3750. net = ERR_PTR(-EINVAL);
  3751. }
  3752. if (IS_ERR(net)) {
  3753. NL_SET_ERR_MSG_MOD(info->extack, "Unknown network namespace");
  3754. return ERR_PTR(-EINVAL);
  3755. }
  3756. if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
  3757. put_net(net);
  3758. return ERR_PTR(-EPERM);
  3759. }
  3760. return net;
  3761. }
  3762. static void devlink_param_notify(struct devlink *devlink,
  3763. unsigned int port_index,
  3764. struct devlink_param_item *param_item,
  3765. enum devlink_command cmd);
  3766. static void devlink_ns_change_notify(struct devlink *devlink,
  3767. struct net *dest_net, struct net *curr_net,
  3768. bool new)
  3769. {
  3770. struct devlink_param_item *param_item;
  3771. enum devlink_command cmd;
  3772. /* Userspace needs to be notified about devlink objects
  3773. * removed from original and entering new network namespace.
  3774. * The rest of the devlink objects are re-created during
  3775. * reload process so the notifications are generated separatelly.
  3776. */
  3777. if (!dest_net || net_eq(dest_net, curr_net))
  3778. return;
  3779. if (new)
  3780. devlink_notify(devlink, DEVLINK_CMD_NEW);
  3781. cmd = new ? DEVLINK_CMD_PARAM_NEW : DEVLINK_CMD_PARAM_DEL;
  3782. list_for_each_entry(param_item, &devlink->param_list, list)
  3783. devlink_param_notify(devlink, 0, param_item, cmd);
  3784. if (!new)
  3785. devlink_notify(devlink, DEVLINK_CMD_DEL);
  3786. }
  3787. static bool devlink_reload_supported(const struct devlink_ops *ops)
  3788. {
  3789. return ops->reload_down && ops->reload_up;
  3790. }
  3791. static void devlink_reload_failed_set(struct devlink *devlink,
  3792. bool reload_failed)
  3793. {
  3794. if (devlink->reload_failed == reload_failed)
  3795. return;
  3796. devlink->reload_failed = reload_failed;
  3797. devlink_notify(devlink, DEVLINK_CMD_NEW);
  3798. }
  3799. bool devlink_is_reload_failed(const struct devlink *devlink)
  3800. {
  3801. return devlink->reload_failed;
  3802. }
  3803. EXPORT_SYMBOL_GPL(devlink_is_reload_failed);
  3804. static void
  3805. __devlink_reload_stats_update(struct devlink *devlink, u32 *reload_stats,
  3806. enum devlink_reload_limit limit, u32 actions_performed)
  3807. {
  3808. unsigned long actions = actions_performed;
  3809. int stat_idx;
  3810. int action;
  3811. for_each_set_bit(action, &actions, __DEVLINK_RELOAD_ACTION_MAX) {
  3812. stat_idx = limit * __DEVLINK_RELOAD_ACTION_MAX + action;
  3813. reload_stats[stat_idx]++;
  3814. }
  3815. devlink_notify(devlink, DEVLINK_CMD_NEW);
  3816. }
  3817. static void
  3818. devlink_reload_stats_update(struct devlink *devlink, enum devlink_reload_limit limit,
  3819. u32 actions_performed)
  3820. {
  3821. __devlink_reload_stats_update(devlink, devlink->stats.reload_stats, limit,
  3822. actions_performed);
  3823. }
  3824. /**
  3825. * devlink_remote_reload_actions_performed - Update devlink on reload actions
  3826. * performed which are not a direct result of devlink reload call.
  3827. *
  3828. * This should be called by a driver after performing reload actions in case it was not
  3829. * a result of devlink reload call. For example fw_activate was performed as a result
  3830. * of devlink reload triggered fw_activate on another host.
  3831. * The motivation for this function is to keep data on reload actions performed on this
  3832. * function whether it was done due to direct devlink reload call or not.
  3833. *
  3834. * @devlink: devlink
  3835. * @limit: reload limit
  3836. * @actions_performed: bitmask of actions performed
  3837. */
  3838. void devlink_remote_reload_actions_performed(struct devlink *devlink,
  3839. enum devlink_reload_limit limit,
  3840. u32 actions_performed)
  3841. {
  3842. if (WARN_ON(!actions_performed ||
  3843. actions_performed & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) ||
  3844. actions_performed >= BIT(__DEVLINK_RELOAD_ACTION_MAX) ||
  3845. limit > DEVLINK_RELOAD_LIMIT_MAX))
  3846. return;
  3847. __devlink_reload_stats_update(devlink, devlink->stats.remote_reload_stats, limit,
  3848. actions_performed);
  3849. }
  3850. EXPORT_SYMBOL_GPL(devlink_remote_reload_actions_performed);
  3851. static int devlink_reload(struct devlink *devlink, struct net *dest_net,
  3852. enum devlink_reload_action action, enum devlink_reload_limit limit,
  3853. u32 *actions_performed, struct netlink_ext_ack *extack)
  3854. {
  3855. u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
  3856. struct net *curr_net;
  3857. int err;
  3858. memcpy(remote_reload_stats, devlink->stats.remote_reload_stats,
  3859. sizeof(remote_reload_stats));
  3860. curr_net = devlink_net(devlink);
  3861. devlink_ns_change_notify(devlink, dest_net, curr_net, false);
  3862. err = devlink->ops->reload_down(devlink, !!dest_net, action, limit, extack);
  3863. if (err)
  3864. return err;
  3865. if (dest_net && !net_eq(dest_net, curr_net))
  3866. write_pnet(&devlink->_net, dest_net);
  3867. err = devlink->ops->reload_up(devlink, action, limit, actions_performed, extack);
  3868. devlink_reload_failed_set(devlink, !!err);
  3869. if (err)
  3870. return err;
  3871. devlink_ns_change_notify(devlink, dest_net, curr_net, true);
  3872. WARN_ON(!(*actions_performed & BIT(action)));
  3873. /* Catch driver on updating the remote action within devlink reload */
  3874. WARN_ON(memcmp(remote_reload_stats, devlink->stats.remote_reload_stats,
  3875. sizeof(remote_reload_stats)));
  3876. devlink_reload_stats_update(devlink, limit, *actions_performed);
  3877. return 0;
  3878. }
  3879. static int
  3880. devlink_nl_reload_actions_performed_snd(struct devlink *devlink, u32 actions_performed,
  3881. enum devlink_command cmd, struct genl_info *info)
  3882. {
  3883. struct sk_buff *msg;
  3884. void *hdr;
  3885. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  3886. if (!msg)
  3887. return -ENOMEM;
  3888. hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &devlink_nl_family, 0, cmd);
  3889. if (!hdr)
  3890. goto free_msg;
  3891. if (devlink_nl_put_handle(msg, devlink))
  3892. goto nla_put_failure;
  3893. if (nla_put_bitfield32(msg, DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED, actions_performed,
  3894. actions_performed))
  3895. goto nla_put_failure;
  3896. genlmsg_end(msg, hdr);
  3897. return genlmsg_reply(msg, info);
  3898. nla_put_failure:
  3899. genlmsg_cancel(msg, hdr);
  3900. free_msg:
  3901. nlmsg_free(msg);
  3902. return -EMSGSIZE;
  3903. }
  3904. static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
  3905. {
  3906. struct devlink *devlink = info->user_ptr[0];
  3907. enum devlink_reload_action action;
  3908. enum devlink_reload_limit limit;
  3909. struct net *dest_net = NULL;
  3910. u32 actions_performed;
  3911. int err;
  3912. if (!(devlink->features & DEVLINK_F_RELOAD))
  3913. return -EOPNOTSUPP;
  3914. err = devlink_resources_validate(devlink, NULL, info);
  3915. if (err) {
  3916. NL_SET_ERR_MSG_MOD(info->extack, "resources size validation failed");
  3917. return err;
  3918. }
  3919. if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION])
  3920. action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]);
  3921. else
  3922. action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT;
  3923. if (!devlink_reload_action_is_supported(devlink, action)) {
  3924. NL_SET_ERR_MSG_MOD(info->extack,
  3925. "Requested reload action is not supported by the driver");
  3926. return -EOPNOTSUPP;
  3927. }
  3928. limit = DEVLINK_RELOAD_LIMIT_UNSPEC;
  3929. if (info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]) {
  3930. struct nla_bitfield32 limits;
  3931. u32 limits_selected;
  3932. limits = nla_get_bitfield32(info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]);
  3933. limits_selected = limits.value & limits.selector;
  3934. if (!limits_selected) {
  3935. NL_SET_ERR_MSG_MOD(info->extack, "Invalid limit selected");
  3936. return -EINVAL;
  3937. }
  3938. for (limit = 0 ; limit <= DEVLINK_RELOAD_LIMIT_MAX ; limit++)
  3939. if (limits_selected & BIT(limit))
  3940. break;
  3941. /* UAPI enables multiselection, but currently it is not used */
  3942. if (limits_selected != BIT(limit)) {
  3943. NL_SET_ERR_MSG_MOD(info->extack,
  3944. "Multiselection of limit is not supported");
  3945. return -EOPNOTSUPP;
  3946. }
  3947. if (!devlink_reload_limit_is_supported(devlink, limit)) {
  3948. NL_SET_ERR_MSG_MOD(info->extack,
  3949. "Requested limit is not supported by the driver");
  3950. return -EOPNOTSUPP;
  3951. }
  3952. if (devlink_reload_combination_is_invalid(action, limit)) {
  3953. NL_SET_ERR_MSG_MOD(info->extack,
  3954. "Requested limit is invalid for this action");
  3955. return -EINVAL;
  3956. }
  3957. }
  3958. if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
  3959. info->attrs[DEVLINK_ATTR_NETNS_FD] ||
  3960. info->attrs[DEVLINK_ATTR_NETNS_ID]) {
  3961. dest_net = devlink_netns_get(skb, info);
  3962. if (IS_ERR(dest_net))
  3963. return PTR_ERR(dest_net);
  3964. }
  3965. err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack);
  3966. if (dest_net)
  3967. put_net(dest_net);
  3968. if (err)
  3969. return err;
  3970. /* For backward compatibility generate reply only if attributes used by user */
  3971. if (!info->attrs[DEVLINK_ATTR_RELOAD_ACTION] && !info->attrs[DEVLINK_ATTR_RELOAD_LIMITS])
  3972. return 0;
  3973. return devlink_nl_reload_actions_performed_snd(devlink, actions_performed,
  3974. DEVLINK_CMD_RELOAD, info);
  3975. }
  3976. static int devlink_nl_flash_update_fill(struct sk_buff *msg,
  3977. struct devlink *devlink,
  3978. enum devlink_command cmd,
  3979. struct devlink_flash_notify *params)
  3980. {
  3981. void *hdr;
  3982. hdr = genlmsg_put(msg, 0, 0, &devlink_nl_family, 0, cmd);
  3983. if (!hdr)
  3984. return -EMSGSIZE;
  3985. if (devlink_nl_put_handle(msg, devlink))
  3986. goto nla_put_failure;
  3987. if (cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS)
  3988. goto out;
  3989. if (params->status_msg &&
  3990. nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG,
  3991. params->status_msg))
  3992. goto nla_put_failure;
  3993. if (params->component &&
  3994. nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_COMPONENT,
  3995. params->component))
  3996. goto nla_put_failure;
  3997. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE,
  3998. params->done, DEVLINK_ATTR_PAD))
  3999. goto nla_put_failure;
  4000. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL,
  4001. params->total, DEVLINK_ATTR_PAD))
  4002. goto nla_put_failure;
  4003. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT,
  4004. params->timeout, DEVLINK_ATTR_PAD))
  4005. goto nla_put_failure;
  4006. out:
  4007. genlmsg_end(msg, hdr);
  4008. return 0;
  4009. nla_put_failure:
  4010. genlmsg_cancel(msg, hdr);
  4011. return -EMSGSIZE;
  4012. }
  4013. static void __devlink_flash_update_notify(struct devlink *devlink,
  4014. enum devlink_command cmd,
  4015. struct devlink_flash_notify *params)
  4016. {
  4017. struct sk_buff *msg;
  4018. int err;
  4019. WARN_ON(cmd != DEVLINK_CMD_FLASH_UPDATE &&
  4020. cmd != DEVLINK_CMD_FLASH_UPDATE_END &&
  4021. cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS);
  4022. if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
  4023. return;
  4024. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  4025. if (!msg)
  4026. return;
  4027. err = devlink_nl_flash_update_fill(msg, devlink, cmd, params);
  4028. if (err)
  4029. goto out_free_msg;
  4030. genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
  4031. msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
  4032. return;
  4033. out_free_msg:
  4034. nlmsg_free(msg);
  4035. }
  4036. static void devlink_flash_update_begin_notify(struct devlink *devlink)
  4037. {
  4038. struct devlink_flash_notify params = {};
  4039. __devlink_flash_update_notify(devlink,
  4040. DEVLINK_CMD_FLASH_UPDATE,
  4041. &params);
  4042. }
  4043. static void devlink_flash_update_end_notify(struct devlink *devlink)
  4044. {
  4045. struct devlink_flash_notify params = {};
  4046. __devlink_flash_update_notify(devlink,
  4047. DEVLINK_CMD_FLASH_UPDATE_END,
  4048. &params);
  4049. }
  4050. void devlink_flash_update_status_notify(struct devlink *devlink,
  4051. const char *status_msg,
  4052. const char *component,
  4053. unsigned long done,
  4054. unsigned long total)
  4055. {
  4056. struct devlink_flash_notify params = {
  4057. .status_msg = status_msg,
  4058. .component = component,
  4059. .done = done,
  4060. .total = total,
  4061. };
  4062. __devlink_flash_update_notify(devlink,
  4063. DEVLINK_CMD_FLASH_UPDATE_STATUS,
  4064. &params);
  4065. }
  4066. EXPORT_SYMBOL_GPL(devlink_flash_update_status_notify);
  4067. void devlink_flash_update_timeout_notify(struct devlink *devlink,
  4068. const char *status_msg,
  4069. const char *component,
  4070. unsigned long timeout)
  4071. {
  4072. struct devlink_flash_notify params = {
  4073. .status_msg = status_msg,
  4074. .component = component,
  4075. .timeout = timeout,
  4076. };
  4077. __devlink_flash_update_notify(devlink,
  4078. DEVLINK_CMD_FLASH_UPDATE_STATUS,
  4079. &params);
  4080. }
  4081. EXPORT_SYMBOL_GPL(devlink_flash_update_timeout_notify);
  4082. struct devlink_info_req {
  4083. struct sk_buff *msg;
  4084. void (*version_cb)(const char *version_name,
  4085. enum devlink_info_version_type version_type,
  4086. void *version_cb_priv);
  4087. void *version_cb_priv;
  4088. };
  4089. struct devlink_flash_component_lookup_ctx {
  4090. const char *lookup_name;
  4091. bool lookup_name_found;
  4092. };
  4093. static void
  4094. devlink_flash_component_lookup_cb(const char *version_name,
  4095. enum devlink_info_version_type version_type,
  4096. void *version_cb_priv)
  4097. {
  4098. struct devlink_flash_component_lookup_ctx *lookup_ctx = version_cb_priv;
  4099. if (version_type != DEVLINK_INFO_VERSION_TYPE_COMPONENT ||
  4100. lookup_ctx->lookup_name_found)
  4101. return;
  4102. lookup_ctx->lookup_name_found =
  4103. !strcmp(lookup_ctx->lookup_name, version_name);
  4104. }
  4105. static int devlink_flash_component_get(struct devlink *devlink,
  4106. struct nlattr *nla_component,
  4107. const char **p_component,
  4108. struct netlink_ext_ack *extack)
  4109. {
  4110. struct devlink_flash_component_lookup_ctx lookup_ctx = {};
  4111. struct devlink_info_req req = {};
  4112. const char *component;
  4113. int ret;
  4114. if (!nla_component)
  4115. return 0;
  4116. component = nla_data(nla_component);
  4117. if (!devlink->ops->info_get) {
  4118. NL_SET_ERR_MSG_ATTR(extack, nla_component,
  4119. "component update is not supported by this device");
  4120. return -EOPNOTSUPP;
  4121. }
  4122. lookup_ctx.lookup_name = component;
  4123. req.version_cb = devlink_flash_component_lookup_cb;
  4124. req.version_cb_priv = &lookup_ctx;
  4125. ret = devlink->ops->info_get(devlink, &req, NULL);
  4126. if (ret)
  4127. return ret;
  4128. if (!lookup_ctx.lookup_name_found) {
  4129. NL_SET_ERR_MSG_ATTR(extack, nla_component,
  4130. "selected component is not supported by this device");
  4131. return -EINVAL;
  4132. }
  4133. *p_component = component;
  4134. return 0;
  4135. }
  4136. static int devlink_nl_cmd_flash_update(struct sk_buff *skb,
  4137. struct genl_info *info)
  4138. {
  4139. struct nlattr *nla_overwrite_mask, *nla_file_name;
  4140. struct devlink_flash_update_params params = {};
  4141. struct devlink *devlink = info->user_ptr[0];
  4142. const char *file_name;
  4143. u32 supported_params;
  4144. int ret;
  4145. if (!devlink->ops->flash_update)
  4146. return -EOPNOTSUPP;
  4147. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME))
  4148. return -EINVAL;
  4149. ret = devlink_flash_component_get(devlink,
  4150. info->attrs[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT],
  4151. &params.component, info->extack);
  4152. if (ret)
  4153. return ret;
  4154. supported_params = devlink->ops->supported_flash_update_params;
  4155. nla_overwrite_mask = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK];
  4156. if (nla_overwrite_mask) {
  4157. struct nla_bitfield32 sections;
  4158. if (!(supported_params & DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK)) {
  4159. NL_SET_ERR_MSG_ATTR(info->extack, nla_overwrite_mask,
  4160. "overwrite settings are not supported by this device");
  4161. return -EOPNOTSUPP;
  4162. }
  4163. sections = nla_get_bitfield32(nla_overwrite_mask);
  4164. params.overwrite_mask = sections.value & sections.selector;
  4165. }
  4166. nla_file_name = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME];
  4167. file_name = nla_data(nla_file_name);
  4168. ret = request_firmware(&params.fw, file_name, devlink->dev);
  4169. if (ret) {
  4170. NL_SET_ERR_MSG_ATTR(info->extack, nla_file_name, "failed to locate the requested firmware file");
  4171. return ret;
  4172. }
  4173. devlink_flash_update_begin_notify(devlink);
  4174. ret = devlink->ops->flash_update(devlink, &params, info->extack);
  4175. devlink_flash_update_end_notify(devlink);
  4176. release_firmware(params.fw);
  4177. return ret;
  4178. }
  4179. static int
  4180. devlink_nl_selftests_fill(struct sk_buff *msg, struct devlink *devlink,
  4181. u32 portid, u32 seq, int flags,
  4182. struct netlink_ext_ack *extack)
  4183. {
  4184. struct nlattr *selftests;
  4185. void *hdr;
  4186. int err;
  4187. int i;
  4188. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags,
  4189. DEVLINK_CMD_SELFTESTS_GET);
  4190. if (!hdr)
  4191. return -EMSGSIZE;
  4192. err = -EMSGSIZE;
  4193. if (devlink_nl_put_handle(msg, devlink))
  4194. goto err_cancel_msg;
  4195. selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS);
  4196. if (!selftests)
  4197. goto err_cancel_msg;
  4198. for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1;
  4199. i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) {
  4200. if (devlink->ops->selftest_check(devlink, i, extack)) {
  4201. err = nla_put_flag(msg, i);
  4202. if (err)
  4203. goto err_cancel_msg;
  4204. }
  4205. }
  4206. nla_nest_end(msg, selftests);
  4207. genlmsg_end(msg, hdr);
  4208. return 0;
  4209. err_cancel_msg:
  4210. genlmsg_cancel(msg, hdr);
  4211. return err;
  4212. }
  4213. static int devlink_nl_cmd_selftests_get_doit(struct sk_buff *skb,
  4214. struct genl_info *info)
  4215. {
  4216. struct devlink *devlink = info->user_ptr[0];
  4217. struct sk_buff *msg;
  4218. int err;
  4219. if (!devlink->ops->selftest_check)
  4220. return -EOPNOTSUPP;
  4221. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  4222. if (!msg)
  4223. return -ENOMEM;
  4224. err = devlink_nl_selftests_fill(msg, devlink, info->snd_portid,
  4225. info->snd_seq, 0, info->extack);
  4226. if (err) {
  4227. nlmsg_free(msg);
  4228. return err;
  4229. }
  4230. return genlmsg_reply(msg, info);
  4231. }
  4232. static int devlink_nl_cmd_selftests_get_dumpit(struct sk_buff *msg,
  4233. struct netlink_callback *cb)
  4234. {
  4235. struct devlink *devlink;
  4236. int start = cb->args[0];
  4237. unsigned long index;
  4238. int idx = 0;
  4239. int err = 0;
  4240. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  4241. if (idx < start || !devlink->ops->selftest_check)
  4242. goto inc;
  4243. devl_lock(devlink);
  4244. err = devlink_nl_selftests_fill(msg, devlink,
  4245. NETLINK_CB(cb->skb).portid,
  4246. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  4247. cb->extack);
  4248. devl_unlock(devlink);
  4249. if (err) {
  4250. devlink_put(devlink);
  4251. break;
  4252. }
  4253. inc:
  4254. idx++;
  4255. devlink_put(devlink);
  4256. }
  4257. if (err != -EMSGSIZE)
  4258. return err;
  4259. cb->args[0] = idx;
  4260. return msg->len;
  4261. }
  4262. static int devlink_selftest_result_put(struct sk_buff *skb, unsigned int id,
  4263. enum devlink_selftest_status test_status)
  4264. {
  4265. struct nlattr *result_attr;
  4266. result_attr = nla_nest_start(skb, DEVLINK_ATTR_SELFTEST_RESULT);
  4267. if (!result_attr)
  4268. return -EMSGSIZE;
  4269. if (nla_put_u32(skb, DEVLINK_ATTR_SELFTEST_RESULT_ID, id) ||
  4270. nla_put_u8(skb, DEVLINK_ATTR_SELFTEST_RESULT_STATUS,
  4271. test_status))
  4272. goto nla_put_failure;
  4273. nla_nest_end(skb, result_attr);
  4274. return 0;
  4275. nla_put_failure:
  4276. nla_nest_cancel(skb, result_attr);
  4277. return -EMSGSIZE;
  4278. }
  4279. static int devlink_nl_cmd_selftests_run(struct sk_buff *skb,
  4280. struct genl_info *info)
  4281. {
  4282. struct nlattr *tb[DEVLINK_ATTR_SELFTEST_ID_MAX + 1];
  4283. struct devlink *devlink = info->user_ptr[0];
  4284. struct nlattr *attrs, *selftests;
  4285. struct sk_buff *msg;
  4286. void *hdr;
  4287. int err;
  4288. int i;
  4289. if (!devlink->ops->selftest_run || !devlink->ops->selftest_check)
  4290. return -EOPNOTSUPP;
  4291. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SELFTESTS))
  4292. return -EINVAL;
  4293. attrs = info->attrs[DEVLINK_ATTR_SELFTESTS];
  4294. err = nla_parse_nested(tb, DEVLINK_ATTR_SELFTEST_ID_MAX, attrs,
  4295. devlink_selftest_nl_policy, info->extack);
  4296. if (err < 0)
  4297. return err;
  4298. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  4299. if (!msg)
  4300. return -ENOMEM;
  4301. err = -EMSGSIZE;
  4302. hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
  4303. &devlink_nl_family, 0, DEVLINK_CMD_SELFTESTS_RUN);
  4304. if (!hdr)
  4305. goto free_msg;
  4306. if (devlink_nl_put_handle(msg, devlink))
  4307. goto genlmsg_cancel;
  4308. selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS);
  4309. if (!selftests)
  4310. goto genlmsg_cancel;
  4311. for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1;
  4312. i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) {
  4313. enum devlink_selftest_status test_status;
  4314. if (nla_get_flag(tb[i])) {
  4315. if (!devlink->ops->selftest_check(devlink, i,
  4316. info->extack)) {
  4317. if (devlink_selftest_result_put(msg, i,
  4318. DEVLINK_SELFTEST_STATUS_SKIP))
  4319. goto selftests_nest_cancel;
  4320. continue;
  4321. }
  4322. test_status = devlink->ops->selftest_run(devlink, i,
  4323. info->extack);
  4324. if (devlink_selftest_result_put(msg, i, test_status))
  4325. goto selftests_nest_cancel;
  4326. }
  4327. }
  4328. nla_nest_end(msg, selftests);
  4329. genlmsg_end(msg, hdr);
  4330. return genlmsg_reply(msg, info);
  4331. selftests_nest_cancel:
  4332. nla_nest_cancel(msg, selftests);
  4333. genlmsg_cancel:
  4334. genlmsg_cancel(msg, hdr);
  4335. free_msg:
  4336. nlmsg_free(msg);
  4337. return err;
  4338. }
  4339. static const struct devlink_param devlink_param_generic[] = {
  4340. {
  4341. .id = DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
  4342. .name = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME,
  4343. .type = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE,
  4344. },
  4345. {
  4346. .id = DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
  4347. .name = DEVLINK_PARAM_GENERIC_MAX_MACS_NAME,
  4348. .type = DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE,
  4349. },
  4350. {
  4351. .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
  4352. .name = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME,
  4353. .type = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE,
  4354. },
  4355. {
  4356. .id = DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
  4357. .name = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME,
  4358. .type = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE,
  4359. },
  4360. {
  4361. .id = DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI,
  4362. .name = DEVLINK_PARAM_GENERIC_IGNORE_ARI_NAME,
  4363. .type = DEVLINK_PARAM_GENERIC_IGNORE_ARI_TYPE,
  4364. },
  4365. {
  4366. .id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
  4367. .name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_NAME,
  4368. .type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_TYPE,
  4369. },
  4370. {
  4371. .id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
  4372. .name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME,
  4373. .type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE,
  4374. },
  4375. {
  4376. .id = DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
  4377. .name = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_NAME,
  4378. .type = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_TYPE,
  4379. },
  4380. {
  4381. .id = DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE,
  4382. .name = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_NAME,
  4383. .type = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE,
  4384. },
  4385. {
  4386. .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
  4387. .name = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME,
  4388. .type = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE,
  4389. },
  4390. {
  4391. .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET,
  4392. .name = DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_NAME,
  4393. .type = DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_TYPE,
  4394. },
  4395. {
  4396. .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
  4397. .name = DEVLINK_PARAM_GENERIC_ENABLE_ETH_NAME,
  4398. .type = DEVLINK_PARAM_GENERIC_ENABLE_ETH_TYPE,
  4399. },
  4400. {
  4401. .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
  4402. .name = DEVLINK_PARAM_GENERIC_ENABLE_RDMA_NAME,
  4403. .type = DEVLINK_PARAM_GENERIC_ENABLE_RDMA_TYPE,
  4404. },
  4405. {
  4406. .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
  4407. .name = DEVLINK_PARAM_GENERIC_ENABLE_VNET_NAME,
  4408. .type = DEVLINK_PARAM_GENERIC_ENABLE_VNET_TYPE,
  4409. },
  4410. {
  4411. .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
  4412. .name = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME,
  4413. .type = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE,
  4414. },
  4415. {
  4416. .id = DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
  4417. .name = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_NAME,
  4418. .type = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_TYPE,
  4419. },
  4420. {
  4421. .id = DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
  4422. .name = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME,
  4423. .type = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE,
  4424. },
  4425. };
  4426. static int devlink_param_generic_verify(const struct devlink_param *param)
  4427. {
  4428. /* verify it match generic parameter by id and name */
  4429. if (param->id > DEVLINK_PARAM_GENERIC_ID_MAX)
  4430. return -EINVAL;
  4431. if (strcmp(param->name, devlink_param_generic[param->id].name))
  4432. return -ENOENT;
  4433. WARN_ON(param->type != devlink_param_generic[param->id].type);
  4434. return 0;
  4435. }
  4436. static int devlink_param_driver_verify(const struct devlink_param *param)
  4437. {
  4438. int i;
  4439. if (param->id <= DEVLINK_PARAM_GENERIC_ID_MAX)
  4440. return -EINVAL;
  4441. /* verify no such name in generic params */
  4442. for (i = 0; i <= DEVLINK_PARAM_GENERIC_ID_MAX; i++)
  4443. if (!strcmp(param->name, devlink_param_generic[i].name))
  4444. return -EEXIST;
  4445. return 0;
  4446. }
  4447. static struct devlink_param_item *
  4448. devlink_param_find_by_name(struct list_head *param_list,
  4449. const char *param_name)
  4450. {
  4451. struct devlink_param_item *param_item;
  4452. list_for_each_entry(param_item, param_list, list)
  4453. if (!strcmp(param_item->param->name, param_name))
  4454. return param_item;
  4455. return NULL;
  4456. }
  4457. static struct devlink_param_item *
  4458. devlink_param_find_by_id(struct list_head *param_list, u32 param_id)
  4459. {
  4460. struct devlink_param_item *param_item;
  4461. list_for_each_entry(param_item, param_list, list)
  4462. if (param_item->param->id == param_id)
  4463. return param_item;
  4464. return NULL;
  4465. }
  4466. static bool
  4467. devlink_param_cmode_is_supported(const struct devlink_param *param,
  4468. enum devlink_param_cmode cmode)
  4469. {
  4470. return test_bit(cmode, &param->supported_cmodes);
  4471. }
  4472. static int devlink_param_get(struct devlink *devlink,
  4473. const struct devlink_param *param,
  4474. struct devlink_param_gset_ctx *ctx)
  4475. {
  4476. if (!param->get)
  4477. return -EOPNOTSUPP;
  4478. return param->get(devlink, param->id, ctx);
  4479. }
  4480. static int devlink_param_set(struct devlink *devlink,
  4481. const struct devlink_param *param,
  4482. struct devlink_param_gset_ctx *ctx)
  4483. {
  4484. if (!param->set)
  4485. return -EOPNOTSUPP;
  4486. return param->set(devlink, param->id, ctx);
  4487. }
  4488. static int
  4489. devlink_param_type_to_nla_type(enum devlink_param_type param_type)
  4490. {
  4491. switch (param_type) {
  4492. case DEVLINK_PARAM_TYPE_U8:
  4493. return NLA_U8;
  4494. case DEVLINK_PARAM_TYPE_U16:
  4495. return NLA_U16;
  4496. case DEVLINK_PARAM_TYPE_U32:
  4497. return NLA_U32;
  4498. case DEVLINK_PARAM_TYPE_STRING:
  4499. return NLA_STRING;
  4500. case DEVLINK_PARAM_TYPE_BOOL:
  4501. return NLA_FLAG;
  4502. default:
  4503. return -EINVAL;
  4504. }
  4505. }
  4506. static int
  4507. devlink_nl_param_value_fill_one(struct sk_buff *msg,
  4508. enum devlink_param_type type,
  4509. enum devlink_param_cmode cmode,
  4510. union devlink_param_value val)
  4511. {
  4512. struct nlattr *param_value_attr;
  4513. param_value_attr = nla_nest_start_noflag(msg,
  4514. DEVLINK_ATTR_PARAM_VALUE);
  4515. if (!param_value_attr)
  4516. goto nla_put_failure;
  4517. if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_CMODE, cmode))
  4518. goto value_nest_cancel;
  4519. switch (type) {
  4520. case DEVLINK_PARAM_TYPE_U8:
  4521. if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu8))
  4522. goto value_nest_cancel;
  4523. break;
  4524. case DEVLINK_PARAM_TYPE_U16:
  4525. if (nla_put_u16(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu16))
  4526. goto value_nest_cancel;
  4527. break;
  4528. case DEVLINK_PARAM_TYPE_U32:
  4529. if (nla_put_u32(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu32))
  4530. goto value_nest_cancel;
  4531. break;
  4532. case DEVLINK_PARAM_TYPE_STRING:
  4533. if (nla_put_string(msg, DEVLINK_ATTR_PARAM_VALUE_DATA,
  4534. val.vstr))
  4535. goto value_nest_cancel;
  4536. break;
  4537. case DEVLINK_PARAM_TYPE_BOOL:
  4538. if (val.vbool &&
  4539. nla_put_flag(msg, DEVLINK_ATTR_PARAM_VALUE_DATA))
  4540. goto value_nest_cancel;
  4541. break;
  4542. }
  4543. nla_nest_end(msg, param_value_attr);
  4544. return 0;
  4545. value_nest_cancel:
  4546. nla_nest_cancel(msg, param_value_attr);
  4547. nla_put_failure:
  4548. return -EMSGSIZE;
  4549. }
  4550. static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
  4551. unsigned int port_index,
  4552. struct devlink_param_item *param_item,
  4553. enum devlink_command cmd,
  4554. u32 portid, u32 seq, int flags)
  4555. {
  4556. union devlink_param_value param_value[DEVLINK_PARAM_CMODE_MAX + 1];
  4557. bool param_value_set[DEVLINK_PARAM_CMODE_MAX + 1] = {};
  4558. const struct devlink_param *param = param_item->param;
  4559. struct devlink_param_gset_ctx ctx;
  4560. struct nlattr *param_values_list;
  4561. struct nlattr *param_attr;
  4562. int nla_type;
  4563. void *hdr;
  4564. int err;
  4565. int i;
  4566. /* Get value from driver part to driverinit configuration mode */
  4567. for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) {
  4568. if (!devlink_param_cmode_is_supported(param, i))
  4569. continue;
  4570. if (i == DEVLINK_PARAM_CMODE_DRIVERINIT) {
  4571. if (!param_item->driverinit_value_valid)
  4572. return -EOPNOTSUPP;
  4573. param_value[i] = param_item->driverinit_value;
  4574. } else {
  4575. ctx.cmode = i;
  4576. err = devlink_param_get(devlink, param, &ctx);
  4577. if (err)
  4578. return err;
  4579. param_value[i] = ctx.val;
  4580. }
  4581. param_value_set[i] = true;
  4582. }
  4583. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  4584. if (!hdr)
  4585. return -EMSGSIZE;
  4586. if (devlink_nl_put_handle(msg, devlink))
  4587. goto genlmsg_cancel;
  4588. if (cmd == DEVLINK_CMD_PORT_PARAM_GET ||
  4589. cmd == DEVLINK_CMD_PORT_PARAM_NEW ||
  4590. cmd == DEVLINK_CMD_PORT_PARAM_DEL)
  4591. if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, port_index))
  4592. goto genlmsg_cancel;
  4593. param_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PARAM);
  4594. if (!param_attr)
  4595. goto genlmsg_cancel;
  4596. if (nla_put_string(msg, DEVLINK_ATTR_PARAM_NAME, param->name))
  4597. goto param_nest_cancel;
  4598. if (param->generic && nla_put_flag(msg, DEVLINK_ATTR_PARAM_GENERIC))
  4599. goto param_nest_cancel;
  4600. nla_type = devlink_param_type_to_nla_type(param->type);
  4601. if (nla_type < 0)
  4602. goto param_nest_cancel;
  4603. if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_TYPE, nla_type))
  4604. goto param_nest_cancel;
  4605. param_values_list = nla_nest_start_noflag(msg,
  4606. DEVLINK_ATTR_PARAM_VALUES_LIST);
  4607. if (!param_values_list)
  4608. goto param_nest_cancel;
  4609. for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) {
  4610. if (!param_value_set[i])
  4611. continue;
  4612. err = devlink_nl_param_value_fill_one(msg, param->type,
  4613. i, param_value[i]);
  4614. if (err)
  4615. goto values_list_nest_cancel;
  4616. }
  4617. nla_nest_end(msg, param_values_list);
  4618. nla_nest_end(msg, param_attr);
  4619. genlmsg_end(msg, hdr);
  4620. return 0;
  4621. values_list_nest_cancel:
  4622. nla_nest_end(msg, param_values_list);
  4623. param_nest_cancel:
  4624. nla_nest_cancel(msg, param_attr);
  4625. genlmsg_cancel:
  4626. genlmsg_cancel(msg, hdr);
  4627. return -EMSGSIZE;
  4628. }
  4629. static void devlink_param_notify(struct devlink *devlink,
  4630. unsigned int port_index,
  4631. struct devlink_param_item *param_item,
  4632. enum devlink_command cmd)
  4633. {
  4634. struct sk_buff *msg;
  4635. int err;
  4636. WARN_ON(cmd != DEVLINK_CMD_PARAM_NEW && cmd != DEVLINK_CMD_PARAM_DEL &&
  4637. cmd != DEVLINK_CMD_PORT_PARAM_NEW &&
  4638. cmd != DEVLINK_CMD_PORT_PARAM_DEL);
  4639. ASSERT_DEVLINK_REGISTERED(devlink);
  4640. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  4641. if (!msg)
  4642. return;
  4643. err = devlink_nl_param_fill(msg, devlink, port_index, param_item, cmd,
  4644. 0, 0, 0);
  4645. if (err) {
  4646. nlmsg_free(msg);
  4647. return;
  4648. }
  4649. genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
  4650. msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
  4651. }
  4652. static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
  4653. struct netlink_callback *cb)
  4654. {
  4655. struct devlink_param_item *param_item;
  4656. struct devlink *devlink;
  4657. int start = cb->args[0];
  4658. unsigned long index;
  4659. int idx = 0;
  4660. int err = 0;
  4661. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  4662. devl_lock(devlink);
  4663. list_for_each_entry(param_item, &devlink->param_list, list) {
  4664. if (idx < start) {
  4665. idx++;
  4666. continue;
  4667. }
  4668. err = devlink_nl_param_fill(msg, devlink, 0, param_item,
  4669. DEVLINK_CMD_PARAM_GET,
  4670. NETLINK_CB(cb->skb).portid,
  4671. cb->nlh->nlmsg_seq,
  4672. NLM_F_MULTI);
  4673. if (err == -EOPNOTSUPP) {
  4674. err = 0;
  4675. } else if (err) {
  4676. devl_unlock(devlink);
  4677. devlink_put(devlink);
  4678. goto out;
  4679. }
  4680. idx++;
  4681. }
  4682. devl_unlock(devlink);
  4683. devlink_put(devlink);
  4684. }
  4685. out:
  4686. if (err != -EMSGSIZE)
  4687. return err;
  4688. cb->args[0] = idx;
  4689. return msg->len;
  4690. }
  4691. static int
  4692. devlink_param_type_get_from_info(struct genl_info *info,
  4693. enum devlink_param_type *param_type)
  4694. {
  4695. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_TYPE))
  4696. return -EINVAL;
  4697. switch (nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_TYPE])) {
  4698. case NLA_U8:
  4699. *param_type = DEVLINK_PARAM_TYPE_U8;
  4700. break;
  4701. case NLA_U16:
  4702. *param_type = DEVLINK_PARAM_TYPE_U16;
  4703. break;
  4704. case NLA_U32:
  4705. *param_type = DEVLINK_PARAM_TYPE_U32;
  4706. break;
  4707. case NLA_STRING:
  4708. *param_type = DEVLINK_PARAM_TYPE_STRING;
  4709. break;
  4710. case NLA_FLAG:
  4711. *param_type = DEVLINK_PARAM_TYPE_BOOL;
  4712. break;
  4713. default:
  4714. return -EINVAL;
  4715. }
  4716. return 0;
  4717. }
  4718. static int
  4719. devlink_param_value_get_from_info(const struct devlink_param *param,
  4720. struct genl_info *info,
  4721. union devlink_param_value *value)
  4722. {
  4723. struct nlattr *param_data;
  4724. int len;
  4725. param_data = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA];
  4726. if (param->type != DEVLINK_PARAM_TYPE_BOOL && !param_data)
  4727. return -EINVAL;
  4728. switch (param->type) {
  4729. case DEVLINK_PARAM_TYPE_U8:
  4730. if (nla_len(param_data) != sizeof(u8))
  4731. return -EINVAL;
  4732. value->vu8 = nla_get_u8(param_data);
  4733. break;
  4734. case DEVLINK_PARAM_TYPE_U16:
  4735. if (nla_len(param_data) != sizeof(u16))
  4736. return -EINVAL;
  4737. value->vu16 = nla_get_u16(param_data);
  4738. break;
  4739. case DEVLINK_PARAM_TYPE_U32:
  4740. if (nla_len(param_data) != sizeof(u32))
  4741. return -EINVAL;
  4742. value->vu32 = nla_get_u32(param_data);
  4743. break;
  4744. case DEVLINK_PARAM_TYPE_STRING:
  4745. len = strnlen(nla_data(param_data), nla_len(param_data));
  4746. if (len == nla_len(param_data) ||
  4747. len >= __DEVLINK_PARAM_MAX_STRING_VALUE)
  4748. return -EINVAL;
  4749. strcpy(value->vstr, nla_data(param_data));
  4750. break;
  4751. case DEVLINK_PARAM_TYPE_BOOL:
  4752. if (param_data && nla_len(param_data))
  4753. return -EINVAL;
  4754. value->vbool = nla_get_flag(param_data);
  4755. break;
  4756. }
  4757. return 0;
  4758. }
  4759. static struct devlink_param_item *
  4760. devlink_param_get_from_info(struct list_head *param_list,
  4761. struct genl_info *info)
  4762. {
  4763. char *param_name;
  4764. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_NAME))
  4765. return NULL;
  4766. param_name = nla_data(info->attrs[DEVLINK_ATTR_PARAM_NAME]);
  4767. return devlink_param_find_by_name(param_list, param_name);
  4768. }
  4769. static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
  4770. struct genl_info *info)
  4771. {
  4772. struct devlink *devlink = info->user_ptr[0];
  4773. struct devlink_param_item *param_item;
  4774. struct sk_buff *msg;
  4775. int err;
  4776. param_item = devlink_param_get_from_info(&devlink->param_list, info);
  4777. if (!param_item)
  4778. return -EINVAL;
  4779. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  4780. if (!msg)
  4781. return -ENOMEM;
  4782. err = devlink_nl_param_fill(msg, devlink, 0, param_item,
  4783. DEVLINK_CMD_PARAM_GET,
  4784. info->snd_portid, info->snd_seq, 0);
  4785. if (err) {
  4786. nlmsg_free(msg);
  4787. return err;
  4788. }
  4789. return genlmsg_reply(msg, info);
  4790. }
  4791. static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
  4792. unsigned int port_index,
  4793. struct list_head *param_list,
  4794. struct genl_info *info,
  4795. enum devlink_command cmd)
  4796. {
  4797. enum devlink_param_type param_type;
  4798. struct devlink_param_gset_ctx ctx;
  4799. enum devlink_param_cmode cmode;
  4800. struct devlink_param_item *param_item;
  4801. const struct devlink_param *param;
  4802. union devlink_param_value value;
  4803. int err = 0;
  4804. param_item = devlink_param_get_from_info(param_list, info);
  4805. if (!param_item)
  4806. return -EINVAL;
  4807. param = param_item->param;
  4808. err = devlink_param_type_get_from_info(info, &param_type);
  4809. if (err)
  4810. return err;
  4811. if (param_type != param->type)
  4812. return -EINVAL;
  4813. err = devlink_param_value_get_from_info(param, info, &value);
  4814. if (err)
  4815. return err;
  4816. if (param->validate) {
  4817. err = param->validate(devlink, param->id, value, info->extack);
  4818. if (err)
  4819. return err;
  4820. }
  4821. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_VALUE_CMODE))
  4822. return -EINVAL;
  4823. cmode = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_CMODE]);
  4824. if (!devlink_param_cmode_is_supported(param, cmode))
  4825. return -EOPNOTSUPP;
  4826. if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) {
  4827. if (param->type == DEVLINK_PARAM_TYPE_STRING)
  4828. strcpy(param_item->driverinit_value.vstr, value.vstr);
  4829. else
  4830. param_item->driverinit_value = value;
  4831. param_item->driverinit_value_valid = true;
  4832. } else {
  4833. if (!param->set)
  4834. return -EOPNOTSUPP;
  4835. ctx.val = value;
  4836. ctx.cmode = cmode;
  4837. err = devlink_param_set(devlink, param, &ctx);
  4838. if (err)
  4839. return err;
  4840. }
  4841. devlink_param_notify(devlink, port_index, param_item, cmd);
  4842. return 0;
  4843. }
  4844. static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
  4845. struct genl_info *info)
  4846. {
  4847. struct devlink *devlink = info->user_ptr[0];
  4848. return __devlink_nl_cmd_param_set_doit(devlink, 0, &devlink->param_list,
  4849. info, DEVLINK_CMD_PARAM_NEW);
  4850. }
  4851. static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
  4852. struct netlink_callback *cb)
  4853. {
  4854. NL_SET_ERR_MSG_MOD(cb->extack, "Port params are not supported");
  4855. return msg->len;
  4856. }
  4857. static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
  4858. struct genl_info *info)
  4859. {
  4860. NL_SET_ERR_MSG_MOD(info->extack, "Port params are not supported");
  4861. return -EINVAL;
  4862. }
  4863. static int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
  4864. struct genl_info *info)
  4865. {
  4866. NL_SET_ERR_MSG_MOD(info->extack, "Port params are not supported");
  4867. return -EINVAL;
  4868. }
  4869. static int devlink_nl_region_snapshot_id_put(struct sk_buff *msg,
  4870. struct devlink *devlink,
  4871. struct devlink_snapshot *snapshot)
  4872. {
  4873. struct nlattr *snap_attr;
  4874. int err;
  4875. snap_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_REGION_SNAPSHOT);
  4876. if (!snap_attr)
  4877. return -EINVAL;
  4878. err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID, snapshot->id);
  4879. if (err)
  4880. goto nla_put_failure;
  4881. nla_nest_end(msg, snap_attr);
  4882. return 0;
  4883. nla_put_failure:
  4884. nla_nest_cancel(msg, snap_attr);
  4885. return err;
  4886. }
  4887. static int devlink_nl_region_snapshots_id_put(struct sk_buff *msg,
  4888. struct devlink *devlink,
  4889. struct devlink_region *region)
  4890. {
  4891. struct devlink_snapshot *snapshot;
  4892. struct nlattr *snapshots_attr;
  4893. int err;
  4894. snapshots_attr = nla_nest_start_noflag(msg,
  4895. DEVLINK_ATTR_REGION_SNAPSHOTS);
  4896. if (!snapshots_attr)
  4897. return -EINVAL;
  4898. list_for_each_entry(snapshot, &region->snapshot_list, list) {
  4899. err = devlink_nl_region_snapshot_id_put(msg, devlink, snapshot);
  4900. if (err)
  4901. goto nla_put_failure;
  4902. }
  4903. nla_nest_end(msg, snapshots_attr);
  4904. return 0;
  4905. nla_put_failure:
  4906. nla_nest_cancel(msg, snapshots_attr);
  4907. return err;
  4908. }
  4909. static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink,
  4910. enum devlink_command cmd, u32 portid,
  4911. u32 seq, int flags,
  4912. struct devlink_region *region)
  4913. {
  4914. void *hdr;
  4915. int err;
  4916. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  4917. if (!hdr)
  4918. return -EMSGSIZE;
  4919. err = devlink_nl_put_handle(msg, devlink);
  4920. if (err)
  4921. goto nla_put_failure;
  4922. if (region->port) {
  4923. err = nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
  4924. region->port->index);
  4925. if (err)
  4926. goto nla_put_failure;
  4927. }
  4928. err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, region->ops->name);
  4929. if (err)
  4930. goto nla_put_failure;
  4931. err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE,
  4932. region->size,
  4933. DEVLINK_ATTR_PAD);
  4934. if (err)
  4935. goto nla_put_failure;
  4936. err = nla_put_u32(msg, DEVLINK_ATTR_REGION_MAX_SNAPSHOTS,
  4937. region->max_snapshots);
  4938. if (err)
  4939. goto nla_put_failure;
  4940. err = devlink_nl_region_snapshots_id_put(msg, devlink, region);
  4941. if (err)
  4942. goto nla_put_failure;
  4943. genlmsg_end(msg, hdr);
  4944. return 0;
  4945. nla_put_failure:
  4946. genlmsg_cancel(msg, hdr);
  4947. return err;
  4948. }
  4949. static struct sk_buff *
  4950. devlink_nl_region_notify_build(struct devlink_region *region,
  4951. struct devlink_snapshot *snapshot,
  4952. enum devlink_command cmd, u32 portid, u32 seq)
  4953. {
  4954. struct devlink *devlink = region->devlink;
  4955. struct sk_buff *msg;
  4956. void *hdr;
  4957. int err;
  4958. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  4959. if (!msg)
  4960. return ERR_PTR(-ENOMEM);
  4961. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, 0, cmd);
  4962. if (!hdr) {
  4963. err = -EMSGSIZE;
  4964. goto out_free_msg;
  4965. }
  4966. err = devlink_nl_put_handle(msg, devlink);
  4967. if (err)
  4968. goto out_cancel_msg;
  4969. if (region->port) {
  4970. err = nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
  4971. region->port->index);
  4972. if (err)
  4973. goto out_cancel_msg;
  4974. }
  4975. err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME,
  4976. region->ops->name);
  4977. if (err)
  4978. goto out_cancel_msg;
  4979. if (snapshot) {
  4980. err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID,
  4981. snapshot->id);
  4982. if (err)
  4983. goto out_cancel_msg;
  4984. } else {
  4985. err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE,
  4986. region->size, DEVLINK_ATTR_PAD);
  4987. if (err)
  4988. goto out_cancel_msg;
  4989. }
  4990. genlmsg_end(msg, hdr);
  4991. return msg;
  4992. out_cancel_msg:
  4993. genlmsg_cancel(msg, hdr);
  4994. out_free_msg:
  4995. nlmsg_free(msg);
  4996. return ERR_PTR(err);
  4997. }
  4998. static void devlink_nl_region_notify(struct devlink_region *region,
  4999. struct devlink_snapshot *snapshot,
  5000. enum devlink_command cmd)
  5001. {
  5002. struct devlink *devlink = region->devlink;
  5003. struct sk_buff *msg;
  5004. WARN_ON(cmd != DEVLINK_CMD_REGION_NEW && cmd != DEVLINK_CMD_REGION_DEL);
  5005. if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
  5006. return;
  5007. msg = devlink_nl_region_notify_build(region, snapshot, cmd, 0, 0);
  5008. if (IS_ERR(msg))
  5009. return;
  5010. genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
  5011. 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
  5012. }
  5013. /**
  5014. * __devlink_snapshot_id_increment - Increment number of snapshots using an id
  5015. * @devlink: devlink instance
  5016. * @id: the snapshot id
  5017. *
  5018. * Track when a new snapshot begins using an id. Load the count for the
  5019. * given id from the snapshot xarray, increment it, and store it back.
  5020. *
  5021. * Called when a new snapshot is created with the given id.
  5022. *
  5023. * The id *must* have been previously allocated by
  5024. * devlink_region_snapshot_id_get().
  5025. *
  5026. * Returns 0 on success, or an error on failure.
  5027. */
  5028. static int __devlink_snapshot_id_increment(struct devlink *devlink, u32 id)
  5029. {
  5030. unsigned long count;
  5031. void *p;
  5032. int err;
  5033. xa_lock(&devlink->snapshot_ids);
  5034. p = xa_load(&devlink->snapshot_ids, id);
  5035. if (WARN_ON(!p)) {
  5036. err = -EINVAL;
  5037. goto unlock;
  5038. }
  5039. if (WARN_ON(!xa_is_value(p))) {
  5040. err = -EINVAL;
  5041. goto unlock;
  5042. }
  5043. count = xa_to_value(p);
  5044. count++;
  5045. err = xa_err(__xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
  5046. GFP_ATOMIC));
  5047. unlock:
  5048. xa_unlock(&devlink->snapshot_ids);
  5049. return err;
  5050. }
  5051. /**
  5052. * __devlink_snapshot_id_decrement - Decrease number of snapshots using an id
  5053. * @devlink: devlink instance
  5054. * @id: the snapshot id
  5055. *
  5056. * Track when a snapshot is deleted and stops using an id. Load the count
  5057. * for the given id from the snapshot xarray, decrement it, and store it
  5058. * back.
  5059. *
  5060. * If the count reaches zero, erase this id from the xarray, freeing it
  5061. * up for future re-use by devlink_region_snapshot_id_get().
  5062. *
  5063. * Called when a snapshot using the given id is deleted, and when the
  5064. * initial allocator of the id is finished using it.
  5065. */
  5066. static void __devlink_snapshot_id_decrement(struct devlink *devlink, u32 id)
  5067. {
  5068. unsigned long count;
  5069. void *p;
  5070. xa_lock(&devlink->snapshot_ids);
  5071. p = xa_load(&devlink->snapshot_ids, id);
  5072. if (WARN_ON(!p))
  5073. goto unlock;
  5074. if (WARN_ON(!xa_is_value(p)))
  5075. goto unlock;
  5076. count = xa_to_value(p);
  5077. if (count > 1) {
  5078. count--;
  5079. __xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
  5080. GFP_ATOMIC);
  5081. } else {
  5082. /* If this was the last user, we can erase this id */
  5083. __xa_erase(&devlink->snapshot_ids, id);
  5084. }
  5085. unlock:
  5086. xa_unlock(&devlink->snapshot_ids);
  5087. }
  5088. /**
  5089. * __devlink_snapshot_id_insert - Insert a specific snapshot ID
  5090. * @devlink: devlink instance
  5091. * @id: the snapshot id
  5092. *
  5093. * Mark the given snapshot id as used by inserting a zero value into the
  5094. * snapshot xarray.
  5095. *
  5096. * This must be called while holding the devlink instance lock. Unlike
  5097. * devlink_snapshot_id_get, the initial reference count is zero, not one.
  5098. * It is expected that the id will immediately be used before
  5099. * releasing the devlink instance lock.
  5100. *
  5101. * Returns zero on success, or an error code if the snapshot id could not
  5102. * be inserted.
  5103. */
  5104. static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id)
  5105. {
  5106. int err;
  5107. xa_lock(&devlink->snapshot_ids);
  5108. if (xa_load(&devlink->snapshot_ids, id)) {
  5109. xa_unlock(&devlink->snapshot_ids);
  5110. return -EEXIST;
  5111. }
  5112. err = xa_err(__xa_store(&devlink->snapshot_ids, id, xa_mk_value(0),
  5113. GFP_ATOMIC));
  5114. xa_unlock(&devlink->snapshot_ids);
  5115. return err;
  5116. }
  5117. /**
  5118. * __devlink_region_snapshot_id_get - get snapshot ID
  5119. * @devlink: devlink instance
  5120. * @id: storage to return snapshot id
  5121. *
  5122. * Allocates a new snapshot id. Returns zero on success, or a negative
  5123. * error on failure. Must be called while holding the devlink instance
  5124. * lock.
  5125. *
  5126. * Snapshot IDs are tracked using an xarray which stores the number of
  5127. * users of the snapshot id.
  5128. *
  5129. * Note that the caller of this function counts as a 'user', in order to
  5130. * avoid race conditions. The caller must release its hold on the
  5131. * snapshot by using devlink_region_snapshot_id_put.
  5132. */
  5133. static int __devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
  5134. {
  5135. return xa_alloc(&devlink->snapshot_ids, id, xa_mk_value(1),
  5136. xa_limit_32b, GFP_KERNEL);
  5137. }
  5138. /**
  5139. * __devlink_region_snapshot_create - create a new snapshot
  5140. * This will add a new snapshot of a region. The snapshot
  5141. * will be stored on the region struct and can be accessed
  5142. * from devlink. This is useful for future analyses of snapshots.
  5143. * Multiple snapshots can be created on a region.
  5144. * The @snapshot_id should be obtained using the getter function.
  5145. *
  5146. * Must be called only while holding the region snapshot lock.
  5147. *
  5148. * @region: devlink region of the snapshot
  5149. * @data: snapshot data
  5150. * @snapshot_id: snapshot id to be created
  5151. */
  5152. static int
  5153. __devlink_region_snapshot_create(struct devlink_region *region,
  5154. u8 *data, u32 snapshot_id)
  5155. {
  5156. struct devlink *devlink = region->devlink;
  5157. struct devlink_snapshot *snapshot;
  5158. int err;
  5159. lockdep_assert_held(&region->snapshot_lock);
  5160. /* check if region can hold one more snapshot */
  5161. if (region->cur_snapshots == region->max_snapshots)
  5162. return -ENOSPC;
  5163. if (devlink_region_snapshot_get_by_id(region, snapshot_id))
  5164. return -EEXIST;
  5165. snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
  5166. if (!snapshot)
  5167. return -ENOMEM;
  5168. err = __devlink_snapshot_id_increment(devlink, snapshot_id);
  5169. if (err)
  5170. goto err_snapshot_id_increment;
  5171. snapshot->id = snapshot_id;
  5172. snapshot->region = region;
  5173. snapshot->data = data;
  5174. list_add_tail(&snapshot->list, &region->snapshot_list);
  5175. region->cur_snapshots++;
  5176. devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_NEW);
  5177. return 0;
  5178. err_snapshot_id_increment:
  5179. kfree(snapshot);
  5180. return err;
  5181. }
  5182. static void devlink_region_snapshot_del(struct devlink_region *region,
  5183. struct devlink_snapshot *snapshot)
  5184. {
  5185. struct devlink *devlink = region->devlink;
  5186. lockdep_assert_held(&region->snapshot_lock);
  5187. devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_DEL);
  5188. region->cur_snapshots--;
  5189. list_del(&snapshot->list);
  5190. region->ops->destructor(snapshot->data);
  5191. __devlink_snapshot_id_decrement(devlink, snapshot->id);
  5192. kfree(snapshot);
  5193. }
  5194. static int devlink_nl_cmd_region_get_doit(struct sk_buff *skb,
  5195. struct genl_info *info)
  5196. {
  5197. struct devlink *devlink = info->user_ptr[0];
  5198. struct devlink_port *port = NULL;
  5199. struct devlink_region *region;
  5200. const char *region_name;
  5201. struct sk_buff *msg;
  5202. unsigned int index;
  5203. int err;
  5204. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME))
  5205. return -EINVAL;
  5206. if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
  5207. index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
  5208. port = devlink_port_get_by_index(devlink, index);
  5209. if (!port)
  5210. return -ENODEV;
  5211. }
  5212. region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
  5213. if (port)
  5214. region = devlink_port_region_get_by_name(port, region_name);
  5215. else
  5216. region = devlink_region_get_by_name(devlink, region_name);
  5217. if (!region)
  5218. return -EINVAL;
  5219. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  5220. if (!msg)
  5221. return -ENOMEM;
  5222. err = devlink_nl_region_fill(msg, devlink, DEVLINK_CMD_REGION_GET,
  5223. info->snd_portid, info->snd_seq, 0,
  5224. region);
  5225. if (err) {
  5226. nlmsg_free(msg);
  5227. return err;
  5228. }
  5229. return genlmsg_reply(msg, info);
  5230. }
  5231. static int devlink_nl_cmd_region_get_port_dumpit(struct sk_buff *msg,
  5232. struct netlink_callback *cb,
  5233. struct devlink_port *port,
  5234. int *idx,
  5235. int start)
  5236. {
  5237. struct devlink_region *region;
  5238. int err = 0;
  5239. list_for_each_entry(region, &port->region_list, list) {
  5240. if (*idx < start) {
  5241. (*idx)++;
  5242. continue;
  5243. }
  5244. err = devlink_nl_region_fill(msg, port->devlink,
  5245. DEVLINK_CMD_REGION_GET,
  5246. NETLINK_CB(cb->skb).portid,
  5247. cb->nlh->nlmsg_seq,
  5248. NLM_F_MULTI, region);
  5249. if (err)
  5250. goto out;
  5251. (*idx)++;
  5252. }
  5253. out:
  5254. return err;
  5255. }
  5256. static int devlink_nl_cmd_region_get_devlink_dumpit(struct sk_buff *msg,
  5257. struct netlink_callback *cb,
  5258. struct devlink *devlink,
  5259. int *idx,
  5260. int start)
  5261. {
  5262. struct devlink_region *region;
  5263. struct devlink_port *port;
  5264. int err = 0;
  5265. devl_lock(devlink);
  5266. list_for_each_entry(region, &devlink->region_list, list) {
  5267. if (*idx < start) {
  5268. (*idx)++;
  5269. continue;
  5270. }
  5271. err = devlink_nl_region_fill(msg, devlink,
  5272. DEVLINK_CMD_REGION_GET,
  5273. NETLINK_CB(cb->skb).portid,
  5274. cb->nlh->nlmsg_seq,
  5275. NLM_F_MULTI, region);
  5276. if (err)
  5277. goto out;
  5278. (*idx)++;
  5279. }
  5280. list_for_each_entry(port, &devlink->port_list, list) {
  5281. err = devlink_nl_cmd_region_get_port_dumpit(msg, cb, port, idx,
  5282. start);
  5283. if (err)
  5284. goto out;
  5285. }
  5286. out:
  5287. devl_unlock(devlink);
  5288. return err;
  5289. }
  5290. static int devlink_nl_cmd_region_get_dumpit(struct sk_buff *msg,
  5291. struct netlink_callback *cb)
  5292. {
  5293. struct devlink *devlink;
  5294. int start = cb->args[0];
  5295. unsigned long index;
  5296. int idx = 0;
  5297. int err = 0;
  5298. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  5299. err = devlink_nl_cmd_region_get_devlink_dumpit(msg, cb, devlink,
  5300. &idx, start);
  5301. devlink_put(devlink);
  5302. if (err)
  5303. goto out;
  5304. }
  5305. out:
  5306. cb->args[0] = idx;
  5307. return msg->len;
  5308. }
  5309. static int devlink_nl_cmd_region_del(struct sk_buff *skb,
  5310. struct genl_info *info)
  5311. {
  5312. struct devlink *devlink = info->user_ptr[0];
  5313. struct devlink_snapshot *snapshot;
  5314. struct devlink_port *port = NULL;
  5315. struct devlink_region *region;
  5316. const char *region_name;
  5317. unsigned int index;
  5318. u32 snapshot_id;
  5319. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME) ||
  5320. GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_SNAPSHOT_ID))
  5321. return -EINVAL;
  5322. region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
  5323. snapshot_id = nla_get_u32(info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]);
  5324. if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
  5325. index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
  5326. port = devlink_port_get_by_index(devlink, index);
  5327. if (!port)
  5328. return -ENODEV;
  5329. }
  5330. if (port)
  5331. region = devlink_port_region_get_by_name(port, region_name);
  5332. else
  5333. region = devlink_region_get_by_name(devlink, region_name);
  5334. if (!region)
  5335. return -EINVAL;
  5336. mutex_lock(&region->snapshot_lock);
  5337. snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
  5338. if (!snapshot) {
  5339. mutex_unlock(&region->snapshot_lock);
  5340. return -EINVAL;
  5341. }
  5342. devlink_region_snapshot_del(region, snapshot);
  5343. mutex_unlock(&region->snapshot_lock);
  5344. return 0;
  5345. }
  5346. static int
  5347. devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
  5348. {
  5349. struct devlink *devlink = info->user_ptr[0];
  5350. struct devlink_snapshot *snapshot;
  5351. struct devlink_port *port = NULL;
  5352. struct nlattr *snapshot_id_attr;
  5353. struct devlink_region *region;
  5354. const char *region_name;
  5355. unsigned int index;
  5356. u32 snapshot_id;
  5357. u8 *data;
  5358. int err;
  5359. if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME)) {
  5360. NL_SET_ERR_MSG_MOD(info->extack, "No region name provided");
  5361. return -EINVAL;
  5362. }
  5363. region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
  5364. if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
  5365. index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
  5366. port = devlink_port_get_by_index(devlink, index);
  5367. if (!port)
  5368. return -ENODEV;
  5369. }
  5370. if (port)
  5371. region = devlink_port_region_get_by_name(port, region_name);
  5372. else
  5373. region = devlink_region_get_by_name(devlink, region_name);
  5374. if (!region) {
  5375. NL_SET_ERR_MSG_MOD(info->extack, "The requested region does not exist");
  5376. return -EINVAL;
  5377. }
  5378. if (!region->ops->snapshot) {
  5379. NL_SET_ERR_MSG_MOD(info->extack, "The requested region does not support taking an immediate snapshot");
  5380. return -EOPNOTSUPP;
  5381. }
  5382. mutex_lock(&region->snapshot_lock);
  5383. if (region->cur_snapshots == region->max_snapshots) {
  5384. NL_SET_ERR_MSG_MOD(info->extack, "The region has reached the maximum number of stored snapshots");
  5385. err = -ENOSPC;
  5386. goto unlock;
  5387. }
  5388. snapshot_id_attr = info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID];
  5389. if (snapshot_id_attr) {
  5390. snapshot_id = nla_get_u32(snapshot_id_attr);
  5391. if (devlink_region_snapshot_get_by_id(region, snapshot_id)) {
  5392. NL_SET_ERR_MSG_MOD(info->extack, "The requested snapshot id is already in use");
  5393. err = -EEXIST;
  5394. goto unlock;
  5395. }
  5396. err = __devlink_snapshot_id_insert(devlink, snapshot_id);
  5397. if (err)
  5398. goto unlock;
  5399. } else {
  5400. err = __devlink_region_snapshot_id_get(devlink, &snapshot_id);
  5401. if (err) {
  5402. NL_SET_ERR_MSG_MOD(info->extack, "Failed to allocate a new snapshot id");
  5403. goto unlock;
  5404. }
  5405. }
  5406. if (port)
  5407. err = region->port_ops->snapshot(port, region->port_ops,
  5408. info->extack, &data);
  5409. else
  5410. err = region->ops->snapshot(devlink, region->ops,
  5411. info->extack, &data);
  5412. if (err)
  5413. goto err_snapshot_capture;
  5414. err = __devlink_region_snapshot_create(region, data, snapshot_id);
  5415. if (err)
  5416. goto err_snapshot_create;
  5417. if (!snapshot_id_attr) {
  5418. struct sk_buff *msg;
  5419. snapshot = devlink_region_snapshot_get_by_id(region,
  5420. snapshot_id);
  5421. if (WARN_ON(!snapshot)) {
  5422. err = -EINVAL;
  5423. goto unlock;
  5424. }
  5425. msg = devlink_nl_region_notify_build(region, snapshot,
  5426. DEVLINK_CMD_REGION_NEW,
  5427. info->snd_portid,
  5428. info->snd_seq);
  5429. err = PTR_ERR_OR_ZERO(msg);
  5430. if (err)
  5431. goto err_notify;
  5432. err = genlmsg_reply(msg, info);
  5433. if (err)
  5434. goto err_notify;
  5435. }
  5436. mutex_unlock(&region->snapshot_lock);
  5437. return 0;
  5438. err_snapshot_create:
  5439. region->ops->destructor(data);
  5440. err_snapshot_capture:
  5441. __devlink_snapshot_id_decrement(devlink, snapshot_id);
  5442. mutex_unlock(&region->snapshot_lock);
  5443. return err;
  5444. err_notify:
  5445. devlink_region_snapshot_del(region, snapshot);
  5446. unlock:
  5447. mutex_unlock(&region->snapshot_lock);
  5448. return err;
  5449. }
  5450. static int devlink_nl_cmd_region_read_chunk_fill(struct sk_buff *msg,
  5451. struct devlink *devlink,
  5452. u8 *chunk, u32 chunk_size,
  5453. u64 addr)
  5454. {
  5455. struct nlattr *chunk_attr;
  5456. int err;
  5457. chunk_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_REGION_CHUNK);
  5458. if (!chunk_attr)
  5459. return -EINVAL;
  5460. err = nla_put(msg, DEVLINK_ATTR_REGION_CHUNK_DATA, chunk_size, chunk);
  5461. if (err)
  5462. goto nla_put_failure;
  5463. err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_CHUNK_ADDR, addr,
  5464. DEVLINK_ATTR_PAD);
  5465. if (err)
  5466. goto nla_put_failure;
  5467. nla_nest_end(msg, chunk_attr);
  5468. return 0;
  5469. nla_put_failure:
  5470. nla_nest_cancel(msg, chunk_attr);
  5471. return err;
  5472. }
  5473. #define DEVLINK_REGION_READ_CHUNK_SIZE 256
  5474. static int devlink_nl_region_read_snapshot_fill(struct sk_buff *skb,
  5475. struct devlink *devlink,
  5476. struct devlink_region *region,
  5477. struct nlattr **attrs,
  5478. u64 start_offset,
  5479. u64 end_offset,
  5480. u64 *new_offset)
  5481. {
  5482. struct devlink_snapshot *snapshot;
  5483. u64 curr_offset = start_offset;
  5484. u32 snapshot_id;
  5485. int err = 0;
  5486. *new_offset = start_offset;
  5487. snapshot_id = nla_get_u32(attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]);
  5488. snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
  5489. if (!snapshot)
  5490. return -EINVAL;
  5491. while (curr_offset < end_offset) {
  5492. u32 data_size;
  5493. u8 *data;
  5494. if (end_offset - curr_offset < DEVLINK_REGION_READ_CHUNK_SIZE)
  5495. data_size = end_offset - curr_offset;
  5496. else
  5497. data_size = DEVLINK_REGION_READ_CHUNK_SIZE;
  5498. data = &snapshot->data[curr_offset];
  5499. err = devlink_nl_cmd_region_read_chunk_fill(skb, devlink,
  5500. data, data_size,
  5501. curr_offset);
  5502. if (err)
  5503. break;
  5504. curr_offset += data_size;
  5505. }
  5506. *new_offset = curr_offset;
  5507. return err;
  5508. }
  5509. static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
  5510. struct netlink_callback *cb)
  5511. {
  5512. const struct genl_dumpit_info *info = genl_dumpit_info(cb);
  5513. u64 ret_offset, start_offset, end_offset = U64_MAX;
  5514. struct nlattr **attrs = info->attrs;
  5515. struct devlink_port *port = NULL;
  5516. struct devlink_region *region;
  5517. struct nlattr *chunks_attr;
  5518. const char *region_name;
  5519. struct devlink *devlink;
  5520. unsigned int index;
  5521. void *hdr;
  5522. int err;
  5523. start_offset = *((u64 *)&cb->args[0]);
  5524. devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
  5525. if (IS_ERR(devlink))
  5526. return PTR_ERR(devlink);
  5527. devl_lock(devlink);
  5528. if (!attrs[DEVLINK_ATTR_REGION_NAME] ||
  5529. !attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]) {
  5530. err = -EINVAL;
  5531. goto out_unlock;
  5532. }
  5533. if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
  5534. index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
  5535. port = devlink_port_get_by_index(devlink, index);
  5536. if (!port) {
  5537. err = -ENODEV;
  5538. goto out_unlock;
  5539. }
  5540. }
  5541. region_name = nla_data(attrs[DEVLINK_ATTR_REGION_NAME]);
  5542. if (port)
  5543. region = devlink_port_region_get_by_name(port, region_name);
  5544. else
  5545. region = devlink_region_get_by_name(devlink, region_name);
  5546. if (!region) {
  5547. err = -EINVAL;
  5548. goto out_unlock;
  5549. }
  5550. if (attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR] &&
  5551. attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]) {
  5552. if (!start_offset)
  5553. start_offset =
  5554. nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
  5555. end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
  5556. end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]);
  5557. }
  5558. if (end_offset > region->size)
  5559. end_offset = region->size;
  5560. /* return 0 if there is no further data to read */
  5561. if (start_offset == end_offset) {
  5562. err = 0;
  5563. goto out_unlock;
  5564. }
  5565. hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  5566. &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI,
  5567. DEVLINK_CMD_REGION_READ);
  5568. if (!hdr) {
  5569. err = -EMSGSIZE;
  5570. goto out_unlock;
  5571. }
  5572. err = devlink_nl_put_handle(skb, devlink);
  5573. if (err)
  5574. goto nla_put_failure;
  5575. if (region->port) {
  5576. err = nla_put_u32(skb, DEVLINK_ATTR_PORT_INDEX,
  5577. region->port->index);
  5578. if (err)
  5579. goto nla_put_failure;
  5580. }
  5581. err = nla_put_string(skb, DEVLINK_ATTR_REGION_NAME, region_name);
  5582. if (err)
  5583. goto nla_put_failure;
  5584. chunks_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_REGION_CHUNKS);
  5585. if (!chunks_attr) {
  5586. err = -EMSGSIZE;
  5587. goto nla_put_failure;
  5588. }
  5589. err = devlink_nl_region_read_snapshot_fill(skb, devlink,
  5590. region, attrs,
  5591. start_offset,
  5592. end_offset, &ret_offset);
  5593. if (err && err != -EMSGSIZE)
  5594. goto nla_put_failure;
  5595. /* Check if there was any progress done to prevent infinite loop */
  5596. if (ret_offset == start_offset) {
  5597. err = -EINVAL;
  5598. goto nla_put_failure;
  5599. }
  5600. *((u64 *)&cb->args[0]) = ret_offset;
  5601. nla_nest_end(skb, chunks_attr);
  5602. genlmsg_end(skb, hdr);
  5603. devl_unlock(devlink);
  5604. devlink_put(devlink);
  5605. return skb->len;
  5606. nla_put_failure:
  5607. genlmsg_cancel(skb, hdr);
  5608. out_unlock:
  5609. devl_unlock(devlink);
  5610. devlink_put(devlink);
  5611. return err;
  5612. }
  5613. int devlink_info_driver_name_put(struct devlink_info_req *req, const char *name)
  5614. {
  5615. if (!req->msg)
  5616. return 0;
  5617. return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME, name);
  5618. }
  5619. EXPORT_SYMBOL_GPL(devlink_info_driver_name_put);
  5620. int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn)
  5621. {
  5622. if (!req->msg)
  5623. return 0;
  5624. return nla_put_string(req->msg, DEVLINK_ATTR_INFO_SERIAL_NUMBER, sn);
  5625. }
  5626. EXPORT_SYMBOL_GPL(devlink_info_serial_number_put);
  5627. int devlink_info_board_serial_number_put(struct devlink_info_req *req,
  5628. const char *bsn)
  5629. {
  5630. if (!req->msg)
  5631. return 0;
  5632. return nla_put_string(req->msg, DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER,
  5633. bsn);
  5634. }
  5635. EXPORT_SYMBOL_GPL(devlink_info_board_serial_number_put);
  5636. static int devlink_info_version_put(struct devlink_info_req *req, int attr,
  5637. const char *version_name,
  5638. const char *version_value,
  5639. enum devlink_info_version_type version_type)
  5640. {
  5641. struct nlattr *nest;
  5642. int err;
  5643. if (req->version_cb)
  5644. req->version_cb(version_name, version_type,
  5645. req->version_cb_priv);
  5646. if (!req->msg)
  5647. return 0;
  5648. nest = nla_nest_start_noflag(req->msg, attr);
  5649. if (!nest)
  5650. return -EMSGSIZE;
  5651. err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_NAME,
  5652. version_name);
  5653. if (err)
  5654. goto nla_put_failure;
  5655. err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_VALUE,
  5656. version_value);
  5657. if (err)
  5658. goto nla_put_failure;
  5659. nla_nest_end(req->msg, nest);
  5660. return 0;
  5661. nla_put_failure:
  5662. nla_nest_cancel(req->msg, nest);
  5663. return err;
  5664. }
  5665. int devlink_info_version_fixed_put(struct devlink_info_req *req,
  5666. const char *version_name,
  5667. const char *version_value)
  5668. {
  5669. return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_FIXED,
  5670. version_name, version_value,
  5671. DEVLINK_INFO_VERSION_TYPE_NONE);
  5672. }
  5673. EXPORT_SYMBOL_GPL(devlink_info_version_fixed_put);
  5674. int devlink_info_version_stored_put(struct devlink_info_req *req,
  5675. const char *version_name,
  5676. const char *version_value)
  5677. {
  5678. return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED,
  5679. version_name, version_value,
  5680. DEVLINK_INFO_VERSION_TYPE_NONE);
  5681. }
  5682. EXPORT_SYMBOL_GPL(devlink_info_version_stored_put);
  5683. int devlink_info_version_stored_put_ext(struct devlink_info_req *req,
  5684. const char *version_name,
  5685. const char *version_value,
  5686. enum devlink_info_version_type version_type)
  5687. {
  5688. return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED,
  5689. version_name, version_value,
  5690. version_type);
  5691. }
  5692. EXPORT_SYMBOL_GPL(devlink_info_version_stored_put_ext);
  5693. int devlink_info_version_running_put(struct devlink_info_req *req,
  5694. const char *version_name,
  5695. const char *version_value)
  5696. {
  5697. return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING,
  5698. version_name, version_value,
  5699. DEVLINK_INFO_VERSION_TYPE_NONE);
  5700. }
  5701. EXPORT_SYMBOL_GPL(devlink_info_version_running_put);
  5702. int devlink_info_version_running_put_ext(struct devlink_info_req *req,
  5703. const char *version_name,
  5704. const char *version_value,
  5705. enum devlink_info_version_type version_type)
  5706. {
  5707. return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING,
  5708. version_name, version_value,
  5709. version_type);
  5710. }
  5711. EXPORT_SYMBOL_GPL(devlink_info_version_running_put_ext);
  5712. static int
  5713. devlink_nl_info_fill(struct sk_buff *msg, struct devlink *devlink,
  5714. enum devlink_command cmd, u32 portid,
  5715. u32 seq, int flags, struct netlink_ext_ack *extack)
  5716. {
  5717. struct devlink_info_req req = {};
  5718. void *hdr;
  5719. int err;
  5720. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  5721. if (!hdr)
  5722. return -EMSGSIZE;
  5723. err = -EMSGSIZE;
  5724. if (devlink_nl_put_handle(msg, devlink))
  5725. goto err_cancel_msg;
  5726. req.msg = msg;
  5727. err = devlink->ops->info_get(devlink, &req, extack);
  5728. if (err)
  5729. goto err_cancel_msg;
  5730. genlmsg_end(msg, hdr);
  5731. return 0;
  5732. err_cancel_msg:
  5733. genlmsg_cancel(msg, hdr);
  5734. return err;
  5735. }
  5736. static int devlink_nl_cmd_info_get_doit(struct sk_buff *skb,
  5737. struct genl_info *info)
  5738. {
  5739. struct devlink *devlink = info->user_ptr[0];
  5740. struct sk_buff *msg;
  5741. int err;
  5742. if (!devlink->ops->info_get)
  5743. return -EOPNOTSUPP;
  5744. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  5745. if (!msg)
  5746. return -ENOMEM;
  5747. err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
  5748. info->snd_portid, info->snd_seq, 0,
  5749. info->extack);
  5750. if (err) {
  5751. nlmsg_free(msg);
  5752. return err;
  5753. }
  5754. return genlmsg_reply(msg, info);
  5755. }
  5756. static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
  5757. struct netlink_callback *cb)
  5758. {
  5759. struct devlink *devlink;
  5760. int start = cb->args[0];
  5761. unsigned long index;
  5762. int idx = 0;
  5763. int err = 0;
  5764. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  5765. if (idx < start || !devlink->ops->info_get)
  5766. goto inc;
  5767. devl_lock(devlink);
  5768. err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
  5769. NETLINK_CB(cb->skb).portid,
  5770. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  5771. cb->extack);
  5772. devl_unlock(devlink);
  5773. if (err == -EOPNOTSUPP)
  5774. err = 0;
  5775. else if (err) {
  5776. devlink_put(devlink);
  5777. break;
  5778. }
  5779. inc:
  5780. idx++;
  5781. devlink_put(devlink);
  5782. }
  5783. if (err != -EMSGSIZE)
  5784. return err;
  5785. cb->args[0] = idx;
  5786. return msg->len;
  5787. }
  5788. struct devlink_fmsg_item {
  5789. struct list_head list;
  5790. int attrtype;
  5791. u8 nla_type;
  5792. u16 len;
  5793. int value[];
  5794. };
  5795. struct devlink_fmsg {
  5796. struct list_head item_list;
  5797. bool putting_binary; /* This flag forces enclosing of binary data
  5798. * in an array brackets. It forces using
  5799. * of designated API:
  5800. * devlink_fmsg_binary_pair_nest_start()
  5801. * devlink_fmsg_binary_pair_nest_end()
  5802. */
  5803. };
  5804. static struct devlink_fmsg *devlink_fmsg_alloc(void)
  5805. {
  5806. struct devlink_fmsg *fmsg;
  5807. fmsg = kzalloc(sizeof(*fmsg), GFP_KERNEL);
  5808. if (!fmsg)
  5809. return NULL;
  5810. INIT_LIST_HEAD(&fmsg->item_list);
  5811. return fmsg;
  5812. }
  5813. static void devlink_fmsg_free(struct devlink_fmsg *fmsg)
  5814. {
  5815. struct devlink_fmsg_item *item, *tmp;
  5816. list_for_each_entry_safe(item, tmp, &fmsg->item_list, list) {
  5817. list_del(&item->list);
  5818. kfree(item);
  5819. }
  5820. kfree(fmsg);
  5821. }
  5822. static int devlink_fmsg_nest_common(struct devlink_fmsg *fmsg,
  5823. int attrtype)
  5824. {
  5825. struct devlink_fmsg_item *item;
  5826. item = kzalloc(sizeof(*item), GFP_KERNEL);
  5827. if (!item)
  5828. return -ENOMEM;
  5829. item->attrtype = attrtype;
  5830. list_add_tail(&item->list, &fmsg->item_list);
  5831. return 0;
  5832. }
  5833. int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg)
  5834. {
  5835. if (fmsg->putting_binary)
  5836. return -EINVAL;
  5837. return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_OBJ_NEST_START);
  5838. }
  5839. EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_start);
  5840. static int devlink_fmsg_nest_end(struct devlink_fmsg *fmsg)
  5841. {
  5842. if (fmsg->putting_binary)
  5843. return -EINVAL;
  5844. return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_NEST_END);
  5845. }
  5846. int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg)
  5847. {
  5848. if (fmsg->putting_binary)
  5849. return -EINVAL;
  5850. return devlink_fmsg_nest_end(fmsg);
  5851. }
  5852. EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_end);
  5853. #define DEVLINK_FMSG_MAX_SIZE (GENLMSG_DEFAULT_SIZE - GENL_HDRLEN - NLA_HDRLEN)
  5854. static int devlink_fmsg_put_name(struct devlink_fmsg *fmsg, const char *name)
  5855. {
  5856. struct devlink_fmsg_item *item;
  5857. if (fmsg->putting_binary)
  5858. return -EINVAL;
  5859. if (strlen(name) + 1 > DEVLINK_FMSG_MAX_SIZE)
  5860. return -EMSGSIZE;
  5861. item = kzalloc(sizeof(*item) + strlen(name) + 1, GFP_KERNEL);
  5862. if (!item)
  5863. return -ENOMEM;
  5864. item->nla_type = NLA_NUL_STRING;
  5865. item->len = strlen(name) + 1;
  5866. item->attrtype = DEVLINK_ATTR_FMSG_OBJ_NAME;
  5867. memcpy(&item->value, name, item->len);
  5868. list_add_tail(&item->list, &fmsg->item_list);
  5869. return 0;
  5870. }
  5871. int devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name)
  5872. {
  5873. int err;
  5874. if (fmsg->putting_binary)
  5875. return -EINVAL;
  5876. err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_PAIR_NEST_START);
  5877. if (err)
  5878. return err;
  5879. err = devlink_fmsg_put_name(fmsg, name);
  5880. if (err)
  5881. return err;
  5882. return 0;
  5883. }
  5884. EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_start);
  5885. int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg)
  5886. {
  5887. if (fmsg->putting_binary)
  5888. return -EINVAL;
  5889. return devlink_fmsg_nest_end(fmsg);
  5890. }
  5891. EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_end);
  5892. int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
  5893. const char *name)
  5894. {
  5895. int err;
  5896. if (fmsg->putting_binary)
  5897. return -EINVAL;
  5898. err = devlink_fmsg_pair_nest_start(fmsg, name);
  5899. if (err)
  5900. return err;
  5901. err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_ARR_NEST_START);
  5902. if (err)
  5903. return err;
  5904. return 0;
  5905. }
  5906. EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_start);
  5907. int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg)
  5908. {
  5909. int err;
  5910. if (fmsg->putting_binary)
  5911. return -EINVAL;
  5912. err = devlink_fmsg_nest_end(fmsg);
  5913. if (err)
  5914. return err;
  5915. err = devlink_fmsg_nest_end(fmsg);
  5916. if (err)
  5917. return err;
  5918. return 0;
  5919. }
  5920. EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_end);
  5921. int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg,
  5922. const char *name)
  5923. {
  5924. int err;
  5925. err = devlink_fmsg_arr_pair_nest_start(fmsg, name);
  5926. if (err)
  5927. return err;
  5928. fmsg->putting_binary = true;
  5929. return err;
  5930. }
  5931. EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_start);
  5932. int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg)
  5933. {
  5934. if (!fmsg->putting_binary)
  5935. return -EINVAL;
  5936. fmsg->putting_binary = false;
  5937. return devlink_fmsg_arr_pair_nest_end(fmsg);
  5938. }
  5939. EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_end);
  5940. static int devlink_fmsg_put_value(struct devlink_fmsg *fmsg,
  5941. const void *value, u16 value_len,
  5942. u8 value_nla_type)
  5943. {
  5944. struct devlink_fmsg_item *item;
  5945. if (value_len > DEVLINK_FMSG_MAX_SIZE)
  5946. return -EMSGSIZE;
  5947. item = kzalloc(sizeof(*item) + value_len, GFP_KERNEL);
  5948. if (!item)
  5949. return -ENOMEM;
  5950. item->nla_type = value_nla_type;
  5951. item->len = value_len;
  5952. item->attrtype = DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA;
  5953. memcpy(&item->value, value, item->len);
  5954. list_add_tail(&item->list, &fmsg->item_list);
  5955. return 0;
  5956. }
  5957. static int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value)
  5958. {
  5959. if (fmsg->putting_binary)
  5960. return -EINVAL;
  5961. return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_FLAG);
  5962. }
  5963. static int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value)
  5964. {
  5965. if (fmsg->putting_binary)
  5966. return -EINVAL;
  5967. return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U8);
  5968. }
  5969. int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value)
  5970. {
  5971. if (fmsg->putting_binary)
  5972. return -EINVAL;
  5973. return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U32);
  5974. }
  5975. EXPORT_SYMBOL_GPL(devlink_fmsg_u32_put);
  5976. static int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value)
  5977. {
  5978. if (fmsg->putting_binary)
  5979. return -EINVAL;
  5980. return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U64);
  5981. }
  5982. int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value)
  5983. {
  5984. if (fmsg->putting_binary)
  5985. return -EINVAL;
  5986. return devlink_fmsg_put_value(fmsg, value, strlen(value) + 1,
  5987. NLA_NUL_STRING);
  5988. }
  5989. EXPORT_SYMBOL_GPL(devlink_fmsg_string_put);
  5990. int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
  5991. u16 value_len)
  5992. {
  5993. if (!fmsg->putting_binary)
  5994. return -EINVAL;
  5995. return devlink_fmsg_put_value(fmsg, value, value_len, NLA_BINARY);
  5996. }
  5997. EXPORT_SYMBOL_GPL(devlink_fmsg_binary_put);
  5998. int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
  5999. bool value)
  6000. {
  6001. int err;
  6002. err = devlink_fmsg_pair_nest_start(fmsg, name);
  6003. if (err)
  6004. return err;
  6005. err = devlink_fmsg_bool_put(fmsg, value);
  6006. if (err)
  6007. return err;
  6008. err = devlink_fmsg_pair_nest_end(fmsg);
  6009. if (err)
  6010. return err;
  6011. return 0;
  6012. }
  6013. EXPORT_SYMBOL_GPL(devlink_fmsg_bool_pair_put);
  6014. int devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name,
  6015. u8 value)
  6016. {
  6017. int err;
  6018. err = devlink_fmsg_pair_nest_start(fmsg, name);
  6019. if (err)
  6020. return err;
  6021. err = devlink_fmsg_u8_put(fmsg, value);
  6022. if (err)
  6023. return err;
  6024. err = devlink_fmsg_pair_nest_end(fmsg);
  6025. if (err)
  6026. return err;
  6027. return 0;
  6028. }
  6029. EXPORT_SYMBOL_GPL(devlink_fmsg_u8_pair_put);
  6030. int devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name,
  6031. u32 value)
  6032. {
  6033. int err;
  6034. err = devlink_fmsg_pair_nest_start(fmsg, name);
  6035. if (err)
  6036. return err;
  6037. err = devlink_fmsg_u32_put(fmsg, value);
  6038. if (err)
  6039. return err;
  6040. err = devlink_fmsg_pair_nest_end(fmsg);
  6041. if (err)
  6042. return err;
  6043. return 0;
  6044. }
  6045. EXPORT_SYMBOL_GPL(devlink_fmsg_u32_pair_put);
  6046. int devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
  6047. u64 value)
  6048. {
  6049. int err;
  6050. err = devlink_fmsg_pair_nest_start(fmsg, name);
  6051. if (err)
  6052. return err;
  6053. err = devlink_fmsg_u64_put(fmsg, value);
  6054. if (err)
  6055. return err;
  6056. err = devlink_fmsg_pair_nest_end(fmsg);
  6057. if (err)
  6058. return err;
  6059. return 0;
  6060. }
  6061. EXPORT_SYMBOL_GPL(devlink_fmsg_u64_pair_put);
  6062. int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
  6063. const char *value)
  6064. {
  6065. int err;
  6066. err = devlink_fmsg_pair_nest_start(fmsg, name);
  6067. if (err)
  6068. return err;
  6069. err = devlink_fmsg_string_put(fmsg, value);
  6070. if (err)
  6071. return err;
  6072. err = devlink_fmsg_pair_nest_end(fmsg);
  6073. if (err)
  6074. return err;
  6075. return 0;
  6076. }
  6077. EXPORT_SYMBOL_GPL(devlink_fmsg_string_pair_put);
  6078. int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
  6079. const void *value, u32 value_len)
  6080. {
  6081. u32 data_size;
  6082. int end_err;
  6083. u32 offset;
  6084. int err;
  6085. err = devlink_fmsg_binary_pair_nest_start(fmsg, name);
  6086. if (err)
  6087. return err;
  6088. for (offset = 0; offset < value_len; offset += data_size) {
  6089. data_size = value_len - offset;
  6090. if (data_size > DEVLINK_FMSG_MAX_SIZE)
  6091. data_size = DEVLINK_FMSG_MAX_SIZE;
  6092. err = devlink_fmsg_binary_put(fmsg, value + offset, data_size);
  6093. if (err)
  6094. break;
  6095. /* Exit from loop with a break (instead of
  6096. * return) to make sure putting_binary is turned off in
  6097. * devlink_fmsg_binary_pair_nest_end
  6098. */
  6099. }
  6100. end_err = devlink_fmsg_binary_pair_nest_end(fmsg);
  6101. if (end_err)
  6102. err = end_err;
  6103. return err;
  6104. }
  6105. EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_put);
  6106. static int
  6107. devlink_fmsg_item_fill_type(struct devlink_fmsg_item *msg, struct sk_buff *skb)
  6108. {
  6109. switch (msg->nla_type) {
  6110. case NLA_FLAG:
  6111. case NLA_U8:
  6112. case NLA_U32:
  6113. case NLA_U64:
  6114. case NLA_NUL_STRING:
  6115. case NLA_BINARY:
  6116. return nla_put_u8(skb, DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE,
  6117. msg->nla_type);
  6118. default:
  6119. return -EINVAL;
  6120. }
  6121. }
  6122. static int
  6123. devlink_fmsg_item_fill_data(struct devlink_fmsg_item *msg, struct sk_buff *skb)
  6124. {
  6125. int attrtype = DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA;
  6126. u8 tmp;
  6127. switch (msg->nla_type) {
  6128. case NLA_FLAG:
  6129. /* Always provide flag data, regardless of its value */
  6130. tmp = *(bool *) msg->value;
  6131. return nla_put_u8(skb, attrtype, tmp);
  6132. case NLA_U8:
  6133. return nla_put_u8(skb, attrtype, *(u8 *) msg->value);
  6134. case NLA_U32:
  6135. return nla_put_u32(skb, attrtype, *(u32 *) msg->value);
  6136. case NLA_U64:
  6137. return nla_put_u64_64bit(skb, attrtype, *(u64 *) msg->value,
  6138. DEVLINK_ATTR_PAD);
  6139. case NLA_NUL_STRING:
  6140. return nla_put_string(skb, attrtype, (char *) &msg->value);
  6141. case NLA_BINARY:
  6142. return nla_put(skb, attrtype, msg->len, (void *) &msg->value);
  6143. default:
  6144. return -EINVAL;
  6145. }
  6146. }
  6147. static int
  6148. devlink_fmsg_prepare_skb(struct devlink_fmsg *fmsg, struct sk_buff *skb,
  6149. int *start)
  6150. {
  6151. struct devlink_fmsg_item *item;
  6152. struct nlattr *fmsg_nlattr;
  6153. int i = 0;
  6154. int err;
  6155. fmsg_nlattr = nla_nest_start_noflag(skb, DEVLINK_ATTR_FMSG);
  6156. if (!fmsg_nlattr)
  6157. return -EMSGSIZE;
  6158. list_for_each_entry(item, &fmsg->item_list, list) {
  6159. if (i < *start) {
  6160. i++;
  6161. continue;
  6162. }
  6163. switch (item->attrtype) {
  6164. case DEVLINK_ATTR_FMSG_OBJ_NEST_START:
  6165. case DEVLINK_ATTR_FMSG_PAIR_NEST_START:
  6166. case DEVLINK_ATTR_FMSG_ARR_NEST_START:
  6167. case DEVLINK_ATTR_FMSG_NEST_END:
  6168. err = nla_put_flag(skb, item->attrtype);
  6169. break;
  6170. case DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA:
  6171. err = devlink_fmsg_item_fill_type(item, skb);
  6172. if (err)
  6173. break;
  6174. err = devlink_fmsg_item_fill_data(item, skb);
  6175. break;
  6176. case DEVLINK_ATTR_FMSG_OBJ_NAME:
  6177. err = nla_put_string(skb, item->attrtype,
  6178. (char *) &item->value);
  6179. break;
  6180. default:
  6181. err = -EINVAL;
  6182. break;
  6183. }
  6184. if (!err)
  6185. *start = ++i;
  6186. else
  6187. break;
  6188. }
  6189. nla_nest_end(skb, fmsg_nlattr);
  6190. return err;
  6191. }
  6192. static int devlink_fmsg_snd(struct devlink_fmsg *fmsg,
  6193. struct genl_info *info,
  6194. enum devlink_command cmd, int flags)
  6195. {
  6196. struct nlmsghdr *nlh;
  6197. struct sk_buff *skb;
  6198. bool last = false;
  6199. int index = 0;
  6200. void *hdr;
  6201. int err;
  6202. while (!last) {
  6203. int tmp_index = index;
  6204. skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
  6205. if (!skb)
  6206. return -ENOMEM;
  6207. hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
  6208. &devlink_nl_family, flags | NLM_F_MULTI, cmd);
  6209. if (!hdr) {
  6210. err = -EMSGSIZE;
  6211. goto nla_put_failure;
  6212. }
  6213. err = devlink_fmsg_prepare_skb(fmsg, skb, &index);
  6214. if (!err)
  6215. last = true;
  6216. else if (err != -EMSGSIZE || tmp_index == index)
  6217. goto nla_put_failure;
  6218. genlmsg_end(skb, hdr);
  6219. err = genlmsg_reply(skb, info);
  6220. if (err)
  6221. return err;
  6222. }
  6223. skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
  6224. if (!skb)
  6225. return -ENOMEM;
  6226. nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
  6227. NLMSG_DONE, 0, flags | NLM_F_MULTI);
  6228. if (!nlh) {
  6229. err = -EMSGSIZE;
  6230. goto nla_put_failure;
  6231. }
  6232. return genlmsg_reply(skb, info);
  6233. nla_put_failure:
  6234. nlmsg_free(skb);
  6235. return err;
  6236. }
  6237. static int devlink_fmsg_dumpit(struct devlink_fmsg *fmsg, struct sk_buff *skb,
  6238. struct netlink_callback *cb,
  6239. enum devlink_command cmd)
  6240. {
  6241. int index = cb->args[0];
  6242. int tmp_index = index;
  6243. void *hdr;
  6244. int err;
  6245. hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  6246. &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI, cmd);
  6247. if (!hdr) {
  6248. err = -EMSGSIZE;
  6249. goto nla_put_failure;
  6250. }
  6251. err = devlink_fmsg_prepare_skb(fmsg, skb, &index);
  6252. if ((err && err != -EMSGSIZE) || tmp_index == index)
  6253. goto nla_put_failure;
  6254. cb->args[0] = index;
  6255. genlmsg_end(skb, hdr);
  6256. return skb->len;
  6257. nla_put_failure:
  6258. genlmsg_cancel(skb, hdr);
  6259. return err;
  6260. }
  6261. struct devlink_health_reporter {
  6262. struct list_head list;
  6263. void *priv;
  6264. const struct devlink_health_reporter_ops *ops;
  6265. struct devlink *devlink;
  6266. struct devlink_port *devlink_port;
  6267. struct devlink_fmsg *dump_fmsg;
  6268. struct mutex dump_lock; /* lock parallel read/write from dump buffers */
  6269. u64 graceful_period;
  6270. bool auto_recover;
  6271. bool auto_dump;
  6272. u8 health_state;
  6273. u64 dump_ts;
  6274. u64 dump_real_ts;
  6275. u64 error_count;
  6276. u64 recovery_count;
  6277. u64 last_recovery_ts;
  6278. refcount_t refcount;
  6279. };
  6280. void *
  6281. devlink_health_reporter_priv(struct devlink_health_reporter *reporter)
  6282. {
  6283. return reporter->priv;
  6284. }
  6285. EXPORT_SYMBOL_GPL(devlink_health_reporter_priv);
  6286. static struct devlink_health_reporter *
  6287. __devlink_health_reporter_find_by_name(struct list_head *reporter_list,
  6288. struct mutex *list_lock,
  6289. const char *reporter_name)
  6290. {
  6291. struct devlink_health_reporter *reporter;
  6292. lockdep_assert_held(list_lock);
  6293. list_for_each_entry(reporter, reporter_list, list)
  6294. if (!strcmp(reporter->ops->name, reporter_name))
  6295. return reporter;
  6296. return NULL;
  6297. }
  6298. static struct devlink_health_reporter *
  6299. devlink_health_reporter_find_by_name(struct devlink *devlink,
  6300. const char *reporter_name)
  6301. {
  6302. return __devlink_health_reporter_find_by_name(&devlink->reporter_list,
  6303. &devlink->reporters_lock,
  6304. reporter_name);
  6305. }
  6306. static struct devlink_health_reporter *
  6307. devlink_port_health_reporter_find_by_name(struct devlink_port *devlink_port,
  6308. const char *reporter_name)
  6309. {
  6310. return __devlink_health_reporter_find_by_name(&devlink_port->reporter_list,
  6311. &devlink_port->reporters_lock,
  6312. reporter_name);
  6313. }
  6314. static struct devlink_health_reporter *
  6315. __devlink_health_reporter_create(struct devlink *devlink,
  6316. const struct devlink_health_reporter_ops *ops,
  6317. u64 graceful_period, void *priv)
  6318. {
  6319. struct devlink_health_reporter *reporter;
  6320. if (WARN_ON(graceful_period && !ops->recover))
  6321. return ERR_PTR(-EINVAL);
  6322. reporter = kzalloc(sizeof(*reporter), GFP_KERNEL);
  6323. if (!reporter)
  6324. return ERR_PTR(-ENOMEM);
  6325. reporter->priv = priv;
  6326. reporter->ops = ops;
  6327. reporter->devlink = devlink;
  6328. reporter->graceful_period = graceful_period;
  6329. reporter->auto_recover = !!ops->recover;
  6330. reporter->auto_dump = !!ops->dump;
  6331. mutex_init(&reporter->dump_lock);
  6332. refcount_set(&reporter->refcount, 1);
  6333. return reporter;
  6334. }
  6335. /**
  6336. * devlink_port_health_reporter_create - create devlink health reporter for
  6337. * specified port instance
  6338. *
  6339. * @port: devlink_port which should contain the new reporter
  6340. * @ops: ops
  6341. * @graceful_period: to avoid recovery loops, in msecs
  6342. * @priv: priv
  6343. */
  6344. struct devlink_health_reporter *
  6345. devlink_port_health_reporter_create(struct devlink_port *port,
  6346. const struct devlink_health_reporter_ops *ops,
  6347. u64 graceful_period, void *priv)
  6348. {
  6349. struct devlink_health_reporter *reporter;
  6350. mutex_lock(&port->reporters_lock);
  6351. if (__devlink_health_reporter_find_by_name(&port->reporter_list,
  6352. &port->reporters_lock, ops->name)) {
  6353. reporter = ERR_PTR(-EEXIST);
  6354. goto unlock;
  6355. }
  6356. reporter = __devlink_health_reporter_create(port->devlink, ops,
  6357. graceful_period, priv);
  6358. if (IS_ERR(reporter))
  6359. goto unlock;
  6360. reporter->devlink_port = port;
  6361. list_add_tail(&reporter->list, &port->reporter_list);
  6362. unlock:
  6363. mutex_unlock(&port->reporters_lock);
  6364. return reporter;
  6365. }
  6366. EXPORT_SYMBOL_GPL(devlink_port_health_reporter_create);
  6367. /**
  6368. * devlink_health_reporter_create - create devlink health reporter
  6369. *
  6370. * @devlink: devlink
  6371. * @ops: ops
  6372. * @graceful_period: to avoid recovery loops, in msecs
  6373. * @priv: priv
  6374. */
  6375. struct devlink_health_reporter *
  6376. devlink_health_reporter_create(struct devlink *devlink,
  6377. const struct devlink_health_reporter_ops *ops,
  6378. u64 graceful_period, void *priv)
  6379. {
  6380. struct devlink_health_reporter *reporter;
  6381. mutex_lock(&devlink->reporters_lock);
  6382. if (devlink_health_reporter_find_by_name(devlink, ops->name)) {
  6383. reporter = ERR_PTR(-EEXIST);
  6384. goto unlock;
  6385. }
  6386. reporter = __devlink_health_reporter_create(devlink, ops,
  6387. graceful_period, priv);
  6388. if (IS_ERR(reporter))
  6389. goto unlock;
  6390. list_add_tail(&reporter->list, &devlink->reporter_list);
  6391. unlock:
  6392. mutex_unlock(&devlink->reporters_lock);
  6393. return reporter;
  6394. }
  6395. EXPORT_SYMBOL_GPL(devlink_health_reporter_create);
  6396. static void
  6397. devlink_health_reporter_free(struct devlink_health_reporter *reporter)
  6398. {
  6399. mutex_destroy(&reporter->dump_lock);
  6400. if (reporter->dump_fmsg)
  6401. devlink_fmsg_free(reporter->dump_fmsg);
  6402. kfree(reporter);
  6403. }
  6404. static void
  6405. devlink_health_reporter_put(struct devlink_health_reporter *reporter)
  6406. {
  6407. if (refcount_dec_and_test(&reporter->refcount))
  6408. devlink_health_reporter_free(reporter);
  6409. }
  6410. static void
  6411. __devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
  6412. {
  6413. list_del(&reporter->list);
  6414. devlink_health_reporter_put(reporter);
  6415. }
  6416. /**
  6417. * devlink_health_reporter_destroy - destroy devlink health reporter
  6418. *
  6419. * @reporter: devlink health reporter to destroy
  6420. */
  6421. void
  6422. devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
  6423. {
  6424. struct mutex *lock = &reporter->devlink->reporters_lock;
  6425. mutex_lock(lock);
  6426. __devlink_health_reporter_destroy(reporter);
  6427. mutex_unlock(lock);
  6428. }
  6429. EXPORT_SYMBOL_GPL(devlink_health_reporter_destroy);
  6430. /**
  6431. * devlink_port_health_reporter_destroy - destroy devlink port health reporter
  6432. *
  6433. * @reporter: devlink health reporter to destroy
  6434. */
  6435. void
  6436. devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter)
  6437. {
  6438. struct mutex *lock = &reporter->devlink_port->reporters_lock;
  6439. mutex_lock(lock);
  6440. __devlink_health_reporter_destroy(reporter);
  6441. mutex_unlock(lock);
  6442. }
  6443. EXPORT_SYMBOL_GPL(devlink_port_health_reporter_destroy);
  6444. static int
  6445. devlink_nl_health_reporter_fill(struct sk_buff *msg,
  6446. struct devlink_health_reporter *reporter,
  6447. enum devlink_command cmd, u32 portid,
  6448. u32 seq, int flags)
  6449. {
  6450. struct devlink *devlink = reporter->devlink;
  6451. struct nlattr *reporter_attr;
  6452. void *hdr;
  6453. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  6454. if (!hdr)
  6455. return -EMSGSIZE;
  6456. if (devlink_nl_put_handle(msg, devlink))
  6457. goto genlmsg_cancel;
  6458. if (reporter->devlink_port) {
  6459. if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, reporter->devlink_port->index))
  6460. goto genlmsg_cancel;
  6461. }
  6462. reporter_attr = nla_nest_start_noflag(msg,
  6463. DEVLINK_ATTR_HEALTH_REPORTER);
  6464. if (!reporter_attr)
  6465. goto genlmsg_cancel;
  6466. if (nla_put_string(msg, DEVLINK_ATTR_HEALTH_REPORTER_NAME,
  6467. reporter->ops->name))
  6468. goto reporter_nest_cancel;
  6469. if (nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_STATE,
  6470. reporter->health_state))
  6471. goto reporter_nest_cancel;
  6472. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT,
  6473. reporter->error_count, DEVLINK_ATTR_PAD))
  6474. goto reporter_nest_cancel;
  6475. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT,
  6476. reporter->recovery_count, DEVLINK_ATTR_PAD))
  6477. goto reporter_nest_cancel;
  6478. if (reporter->ops->recover &&
  6479. nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD,
  6480. reporter->graceful_period,
  6481. DEVLINK_ATTR_PAD))
  6482. goto reporter_nest_cancel;
  6483. if (reporter->ops->recover &&
  6484. nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER,
  6485. reporter->auto_recover))
  6486. goto reporter_nest_cancel;
  6487. if (reporter->dump_fmsg &&
  6488. nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS,
  6489. jiffies_to_msecs(reporter->dump_ts),
  6490. DEVLINK_ATTR_PAD))
  6491. goto reporter_nest_cancel;
  6492. if (reporter->dump_fmsg &&
  6493. nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS,
  6494. reporter->dump_real_ts, DEVLINK_ATTR_PAD))
  6495. goto reporter_nest_cancel;
  6496. if (reporter->ops->dump &&
  6497. nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP,
  6498. reporter->auto_dump))
  6499. goto reporter_nest_cancel;
  6500. nla_nest_end(msg, reporter_attr);
  6501. genlmsg_end(msg, hdr);
  6502. return 0;
  6503. reporter_nest_cancel:
  6504. nla_nest_end(msg, reporter_attr);
  6505. genlmsg_cancel:
  6506. genlmsg_cancel(msg, hdr);
  6507. return -EMSGSIZE;
  6508. }
  6509. static void devlink_recover_notify(struct devlink_health_reporter *reporter,
  6510. enum devlink_command cmd)
  6511. {
  6512. struct devlink *devlink = reporter->devlink;
  6513. struct sk_buff *msg;
  6514. int err;
  6515. WARN_ON(cmd != DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
  6516. WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED));
  6517. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  6518. if (!msg)
  6519. return;
  6520. err = devlink_nl_health_reporter_fill(msg, reporter, cmd, 0, 0, 0);
  6521. if (err) {
  6522. nlmsg_free(msg);
  6523. return;
  6524. }
  6525. genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
  6526. 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
  6527. }
  6528. void
  6529. devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter)
  6530. {
  6531. reporter->recovery_count++;
  6532. reporter->last_recovery_ts = jiffies;
  6533. }
  6534. EXPORT_SYMBOL_GPL(devlink_health_reporter_recovery_done);
  6535. static int
  6536. devlink_health_reporter_recover(struct devlink_health_reporter *reporter,
  6537. void *priv_ctx, struct netlink_ext_ack *extack)
  6538. {
  6539. int err;
  6540. if (reporter->health_state == DEVLINK_HEALTH_REPORTER_STATE_HEALTHY)
  6541. return 0;
  6542. if (!reporter->ops->recover)
  6543. return -EOPNOTSUPP;
  6544. err = reporter->ops->recover(reporter, priv_ctx, extack);
  6545. if (err)
  6546. return err;
  6547. devlink_health_reporter_recovery_done(reporter);
  6548. reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
  6549. devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
  6550. return 0;
  6551. }
  6552. static void
  6553. devlink_health_dump_clear(struct devlink_health_reporter *reporter)
  6554. {
  6555. if (!reporter->dump_fmsg)
  6556. return;
  6557. devlink_fmsg_free(reporter->dump_fmsg);
  6558. reporter->dump_fmsg = NULL;
  6559. }
  6560. static int devlink_health_do_dump(struct devlink_health_reporter *reporter,
  6561. void *priv_ctx,
  6562. struct netlink_ext_ack *extack)
  6563. {
  6564. int err;
  6565. if (!reporter->ops->dump)
  6566. return 0;
  6567. if (reporter->dump_fmsg)
  6568. return 0;
  6569. reporter->dump_fmsg = devlink_fmsg_alloc();
  6570. if (!reporter->dump_fmsg) {
  6571. err = -ENOMEM;
  6572. return err;
  6573. }
  6574. err = devlink_fmsg_obj_nest_start(reporter->dump_fmsg);
  6575. if (err)
  6576. goto dump_err;
  6577. err = reporter->ops->dump(reporter, reporter->dump_fmsg,
  6578. priv_ctx, extack);
  6579. if (err)
  6580. goto dump_err;
  6581. err = devlink_fmsg_obj_nest_end(reporter->dump_fmsg);
  6582. if (err)
  6583. goto dump_err;
  6584. reporter->dump_ts = jiffies;
  6585. reporter->dump_real_ts = ktime_get_real_ns();
  6586. return 0;
  6587. dump_err:
  6588. devlink_health_dump_clear(reporter);
  6589. return err;
  6590. }
  6591. int devlink_health_report(struct devlink_health_reporter *reporter,
  6592. const char *msg, void *priv_ctx)
  6593. {
  6594. enum devlink_health_reporter_state prev_health_state;
  6595. struct devlink *devlink = reporter->devlink;
  6596. unsigned long recover_ts_threshold;
  6597. int ret;
  6598. /* write a log message of the current error */
  6599. WARN_ON(!msg);
  6600. trace_devlink_health_report(devlink, reporter->ops->name, msg);
  6601. reporter->error_count++;
  6602. prev_health_state = reporter->health_state;
  6603. reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
  6604. devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
  6605. /* abort if the previous error wasn't recovered */
  6606. recover_ts_threshold = reporter->last_recovery_ts +
  6607. msecs_to_jiffies(reporter->graceful_period);
  6608. if (reporter->auto_recover &&
  6609. (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY ||
  6610. (reporter->last_recovery_ts && reporter->recovery_count &&
  6611. time_is_after_jiffies(recover_ts_threshold)))) {
  6612. trace_devlink_health_recover_aborted(devlink,
  6613. reporter->ops->name,
  6614. reporter->health_state,
  6615. jiffies -
  6616. reporter->last_recovery_ts);
  6617. return -ECANCELED;
  6618. }
  6619. reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
  6620. if (reporter->auto_dump) {
  6621. mutex_lock(&reporter->dump_lock);
  6622. /* store current dump of current error, for later analysis */
  6623. devlink_health_do_dump(reporter, priv_ctx, NULL);
  6624. mutex_unlock(&reporter->dump_lock);
  6625. }
  6626. if (!reporter->auto_recover)
  6627. return 0;
  6628. devl_lock(devlink);
  6629. ret = devlink_health_reporter_recover(reporter, priv_ctx, NULL);
  6630. devl_unlock(devlink);
  6631. return ret;
  6632. }
  6633. EXPORT_SYMBOL_GPL(devlink_health_report);
  6634. static struct devlink_health_reporter *
  6635. devlink_health_reporter_get_from_attrs(struct devlink *devlink,
  6636. struct nlattr **attrs)
  6637. {
  6638. struct devlink_health_reporter *reporter;
  6639. struct devlink_port *devlink_port;
  6640. char *reporter_name;
  6641. if (!attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME])
  6642. return NULL;
  6643. reporter_name = nla_data(attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]);
  6644. devlink_port = devlink_port_get_from_attrs(devlink, attrs);
  6645. if (IS_ERR(devlink_port)) {
  6646. mutex_lock(&devlink->reporters_lock);
  6647. reporter = devlink_health_reporter_find_by_name(devlink, reporter_name);
  6648. if (reporter)
  6649. refcount_inc(&reporter->refcount);
  6650. mutex_unlock(&devlink->reporters_lock);
  6651. } else {
  6652. mutex_lock(&devlink_port->reporters_lock);
  6653. reporter = devlink_port_health_reporter_find_by_name(devlink_port, reporter_name);
  6654. if (reporter)
  6655. refcount_inc(&reporter->refcount);
  6656. mutex_unlock(&devlink_port->reporters_lock);
  6657. }
  6658. return reporter;
  6659. }
  6660. static struct devlink_health_reporter *
  6661. devlink_health_reporter_get_from_info(struct devlink *devlink,
  6662. struct genl_info *info)
  6663. {
  6664. return devlink_health_reporter_get_from_attrs(devlink, info->attrs);
  6665. }
  6666. static struct devlink_health_reporter *
  6667. devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
  6668. {
  6669. const struct genl_dumpit_info *info = genl_dumpit_info(cb);
  6670. struct devlink_health_reporter *reporter;
  6671. struct nlattr **attrs = info->attrs;
  6672. struct devlink *devlink;
  6673. devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
  6674. if (IS_ERR(devlink))
  6675. return NULL;
  6676. reporter = devlink_health_reporter_get_from_attrs(devlink, attrs);
  6677. devlink_put(devlink);
  6678. return reporter;
  6679. }
  6680. void
  6681. devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
  6682. enum devlink_health_reporter_state state)
  6683. {
  6684. if (WARN_ON(state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY &&
  6685. state != DEVLINK_HEALTH_REPORTER_STATE_ERROR))
  6686. return;
  6687. if (reporter->health_state == state)
  6688. return;
  6689. reporter->health_state = state;
  6690. trace_devlink_health_reporter_state_update(reporter->devlink,
  6691. reporter->ops->name, state);
  6692. devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
  6693. }
  6694. EXPORT_SYMBOL_GPL(devlink_health_reporter_state_update);
  6695. static int devlink_nl_cmd_health_reporter_get_doit(struct sk_buff *skb,
  6696. struct genl_info *info)
  6697. {
  6698. struct devlink *devlink = info->user_ptr[0];
  6699. struct devlink_health_reporter *reporter;
  6700. struct sk_buff *msg;
  6701. int err;
  6702. reporter = devlink_health_reporter_get_from_info(devlink, info);
  6703. if (!reporter)
  6704. return -EINVAL;
  6705. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  6706. if (!msg) {
  6707. err = -ENOMEM;
  6708. goto out;
  6709. }
  6710. err = devlink_nl_health_reporter_fill(msg, reporter,
  6711. DEVLINK_CMD_HEALTH_REPORTER_GET,
  6712. info->snd_portid, info->snd_seq,
  6713. 0);
  6714. if (err) {
  6715. nlmsg_free(msg);
  6716. goto out;
  6717. }
  6718. err = genlmsg_reply(msg, info);
  6719. out:
  6720. devlink_health_reporter_put(reporter);
  6721. return err;
  6722. }
  6723. static int
  6724. devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
  6725. struct netlink_callback *cb)
  6726. {
  6727. struct devlink_health_reporter *reporter;
  6728. struct devlink_port *port;
  6729. struct devlink *devlink;
  6730. int start = cb->args[0];
  6731. unsigned long index;
  6732. int idx = 0;
  6733. int err;
  6734. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  6735. mutex_lock(&devlink->reporters_lock);
  6736. list_for_each_entry(reporter, &devlink->reporter_list,
  6737. list) {
  6738. if (idx < start) {
  6739. idx++;
  6740. continue;
  6741. }
  6742. err = devlink_nl_health_reporter_fill(
  6743. msg, reporter, DEVLINK_CMD_HEALTH_REPORTER_GET,
  6744. NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  6745. NLM_F_MULTI);
  6746. if (err) {
  6747. mutex_unlock(&devlink->reporters_lock);
  6748. devlink_put(devlink);
  6749. goto out;
  6750. }
  6751. idx++;
  6752. }
  6753. mutex_unlock(&devlink->reporters_lock);
  6754. devlink_put(devlink);
  6755. }
  6756. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  6757. devl_lock(devlink);
  6758. list_for_each_entry(port, &devlink->port_list, list) {
  6759. mutex_lock(&port->reporters_lock);
  6760. list_for_each_entry(reporter, &port->reporter_list, list) {
  6761. if (idx < start) {
  6762. idx++;
  6763. continue;
  6764. }
  6765. err = devlink_nl_health_reporter_fill(
  6766. msg, reporter,
  6767. DEVLINK_CMD_HEALTH_REPORTER_GET,
  6768. NETLINK_CB(cb->skb).portid,
  6769. cb->nlh->nlmsg_seq, NLM_F_MULTI);
  6770. if (err) {
  6771. mutex_unlock(&port->reporters_lock);
  6772. devl_unlock(devlink);
  6773. devlink_put(devlink);
  6774. goto out;
  6775. }
  6776. idx++;
  6777. }
  6778. mutex_unlock(&port->reporters_lock);
  6779. }
  6780. devl_unlock(devlink);
  6781. devlink_put(devlink);
  6782. }
  6783. out:
  6784. cb->args[0] = idx;
  6785. return msg->len;
  6786. }
  6787. static int
  6788. devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
  6789. struct genl_info *info)
  6790. {
  6791. struct devlink *devlink = info->user_ptr[0];
  6792. struct devlink_health_reporter *reporter;
  6793. int err;
  6794. reporter = devlink_health_reporter_get_from_info(devlink, info);
  6795. if (!reporter)
  6796. return -EINVAL;
  6797. if (!reporter->ops->recover &&
  6798. (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] ||
  6799. info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER])) {
  6800. err = -EOPNOTSUPP;
  6801. goto out;
  6802. }
  6803. if (!reporter->ops->dump &&
  6804. info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP]) {
  6805. err = -EOPNOTSUPP;
  6806. goto out;
  6807. }
  6808. if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD])
  6809. reporter->graceful_period =
  6810. nla_get_u64(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD]);
  6811. if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER])
  6812. reporter->auto_recover =
  6813. nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER]);
  6814. if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP])
  6815. reporter->auto_dump =
  6816. nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP]);
  6817. devlink_health_reporter_put(reporter);
  6818. return 0;
  6819. out:
  6820. devlink_health_reporter_put(reporter);
  6821. return err;
  6822. }
  6823. static int devlink_nl_cmd_health_reporter_recover_doit(struct sk_buff *skb,
  6824. struct genl_info *info)
  6825. {
  6826. struct devlink *devlink = info->user_ptr[0];
  6827. struct devlink_health_reporter *reporter;
  6828. int err;
  6829. reporter = devlink_health_reporter_get_from_info(devlink, info);
  6830. if (!reporter)
  6831. return -EINVAL;
  6832. err = devlink_health_reporter_recover(reporter, NULL, info->extack);
  6833. devlink_health_reporter_put(reporter);
  6834. return err;
  6835. }
  6836. static int devlink_nl_cmd_health_reporter_diagnose_doit(struct sk_buff *skb,
  6837. struct genl_info *info)
  6838. {
  6839. struct devlink *devlink = info->user_ptr[0];
  6840. struct devlink_health_reporter *reporter;
  6841. struct devlink_fmsg *fmsg;
  6842. int err;
  6843. reporter = devlink_health_reporter_get_from_info(devlink, info);
  6844. if (!reporter)
  6845. return -EINVAL;
  6846. if (!reporter->ops->diagnose) {
  6847. devlink_health_reporter_put(reporter);
  6848. return -EOPNOTSUPP;
  6849. }
  6850. fmsg = devlink_fmsg_alloc();
  6851. if (!fmsg) {
  6852. devlink_health_reporter_put(reporter);
  6853. return -ENOMEM;
  6854. }
  6855. err = devlink_fmsg_obj_nest_start(fmsg);
  6856. if (err)
  6857. goto out;
  6858. err = reporter->ops->diagnose(reporter, fmsg, info->extack);
  6859. if (err)
  6860. goto out;
  6861. err = devlink_fmsg_obj_nest_end(fmsg);
  6862. if (err)
  6863. goto out;
  6864. err = devlink_fmsg_snd(fmsg, info,
  6865. DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE, 0);
  6866. out:
  6867. devlink_fmsg_free(fmsg);
  6868. devlink_health_reporter_put(reporter);
  6869. return err;
  6870. }
  6871. static int
  6872. devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
  6873. struct netlink_callback *cb)
  6874. {
  6875. struct devlink_health_reporter *reporter;
  6876. u64 start = cb->args[0];
  6877. int err;
  6878. reporter = devlink_health_reporter_get_from_cb(cb);
  6879. if (!reporter)
  6880. return -EINVAL;
  6881. if (!reporter->ops->dump) {
  6882. err = -EOPNOTSUPP;
  6883. goto out;
  6884. }
  6885. mutex_lock(&reporter->dump_lock);
  6886. if (!start) {
  6887. err = devlink_health_do_dump(reporter, NULL, cb->extack);
  6888. if (err)
  6889. goto unlock;
  6890. cb->args[1] = reporter->dump_ts;
  6891. }
  6892. if (!reporter->dump_fmsg || cb->args[1] != reporter->dump_ts) {
  6893. NL_SET_ERR_MSG_MOD(cb->extack, "Dump trampled, please retry");
  6894. err = -EAGAIN;
  6895. goto unlock;
  6896. }
  6897. err = devlink_fmsg_dumpit(reporter->dump_fmsg, skb, cb,
  6898. DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET);
  6899. unlock:
  6900. mutex_unlock(&reporter->dump_lock);
  6901. out:
  6902. devlink_health_reporter_put(reporter);
  6903. return err;
  6904. }
  6905. static int
  6906. devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
  6907. struct genl_info *info)
  6908. {
  6909. struct devlink *devlink = info->user_ptr[0];
  6910. struct devlink_health_reporter *reporter;
  6911. reporter = devlink_health_reporter_get_from_info(devlink, info);
  6912. if (!reporter)
  6913. return -EINVAL;
  6914. if (!reporter->ops->dump) {
  6915. devlink_health_reporter_put(reporter);
  6916. return -EOPNOTSUPP;
  6917. }
  6918. mutex_lock(&reporter->dump_lock);
  6919. devlink_health_dump_clear(reporter);
  6920. mutex_unlock(&reporter->dump_lock);
  6921. devlink_health_reporter_put(reporter);
  6922. return 0;
  6923. }
  6924. static int devlink_nl_cmd_health_reporter_test_doit(struct sk_buff *skb,
  6925. struct genl_info *info)
  6926. {
  6927. struct devlink *devlink = info->user_ptr[0];
  6928. struct devlink_health_reporter *reporter;
  6929. int err;
  6930. reporter = devlink_health_reporter_get_from_info(devlink, info);
  6931. if (!reporter)
  6932. return -EINVAL;
  6933. if (!reporter->ops->test) {
  6934. devlink_health_reporter_put(reporter);
  6935. return -EOPNOTSUPP;
  6936. }
  6937. err = reporter->ops->test(reporter, info->extack);
  6938. devlink_health_reporter_put(reporter);
  6939. return err;
  6940. }
  6941. struct devlink_stats {
  6942. u64_stats_t rx_bytes;
  6943. u64_stats_t rx_packets;
  6944. struct u64_stats_sync syncp;
  6945. };
  6946. /**
  6947. * struct devlink_trap_policer_item - Packet trap policer attributes.
  6948. * @policer: Immutable packet trap policer attributes.
  6949. * @rate: Rate in packets / sec.
  6950. * @burst: Burst size in packets.
  6951. * @list: trap_policer_list member.
  6952. *
  6953. * Describes packet trap policer attributes. Created by devlink during trap
  6954. * policer registration.
  6955. */
  6956. struct devlink_trap_policer_item {
  6957. const struct devlink_trap_policer *policer;
  6958. u64 rate;
  6959. u64 burst;
  6960. struct list_head list;
  6961. };
  6962. /**
  6963. * struct devlink_trap_group_item - Packet trap group attributes.
  6964. * @group: Immutable packet trap group attributes.
  6965. * @policer_item: Associated policer item. Can be NULL.
  6966. * @list: trap_group_list member.
  6967. * @stats: Trap group statistics.
  6968. *
  6969. * Describes packet trap group attributes. Created by devlink during trap
  6970. * group registration.
  6971. */
  6972. struct devlink_trap_group_item {
  6973. const struct devlink_trap_group *group;
  6974. struct devlink_trap_policer_item *policer_item;
  6975. struct list_head list;
  6976. struct devlink_stats __percpu *stats;
  6977. };
  6978. /**
  6979. * struct devlink_trap_item - Packet trap attributes.
  6980. * @trap: Immutable packet trap attributes.
  6981. * @group_item: Associated group item.
  6982. * @list: trap_list member.
  6983. * @action: Trap action.
  6984. * @stats: Trap statistics.
  6985. * @priv: Driver private information.
  6986. *
  6987. * Describes both mutable and immutable packet trap attributes. Created by
  6988. * devlink during trap registration and used for all trap related operations.
  6989. */
  6990. struct devlink_trap_item {
  6991. const struct devlink_trap *trap;
  6992. struct devlink_trap_group_item *group_item;
  6993. struct list_head list;
  6994. enum devlink_trap_action action;
  6995. struct devlink_stats __percpu *stats;
  6996. void *priv;
  6997. };
  6998. static struct devlink_trap_policer_item *
  6999. devlink_trap_policer_item_lookup(struct devlink *devlink, u32 id)
  7000. {
  7001. struct devlink_trap_policer_item *policer_item;
  7002. list_for_each_entry(policer_item, &devlink->trap_policer_list, list) {
  7003. if (policer_item->policer->id == id)
  7004. return policer_item;
  7005. }
  7006. return NULL;
  7007. }
  7008. static struct devlink_trap_item *
  7009. devlink_trap_item_lookup(struct devlink *devlink, const char *name)
  7010. {
  7011. struct devlink_trap_item *trap_item;
  7012. list_for_each_entry(trap_item, &devlink->trap_list, list) {
  7013. if (!strcmp(trap_item->trap->name, name))
  7014. return trap_item;
  7015. }
  7016. return NULL;
  7017. }
  7018. static struct devlink_trap_item *
  7019. devlink_trap_item_get_from_info(struct devlink *devlink,
  7020. struct genl_info *info)
  7021. {
  7022. struct nlattr *attr;
  7023. if (!info->attrs[DEVLINK_ATTR_TRAP_NAME])
  7024. return NULL;
  7025. attr = info->attrs[DEVLINK_ATTR_TRAP_NAME];
  7026. return devlink_trap_item_lookup(devlink, nla_data(attr));
  7027. }
  7028. static int
  7029. devlink_trap_action_get_from_info(struct genl_info *info,
  7030. enum devlink_trap_action *p_trap_action)
  7031. {
  7032. u8 val;
  7033. val = nla_get_u8(info->attrs[DEVLINK_ATTR_TRAP_ACTION]);
  7034. switch (val) {
  7035. case DEVLINK_TRAP_ACTION_DROP:
  7036. case DEVLINK_TRAP_ACTION_TRAP:
  7037. case DEVLINK_TRAP_ACTION_MIRROR:
  7038. *p_trap_action = val;
  7039. break;
  7040. default:
  7041. return -EINVAL;
  7042. }
  7043. return 0;
  7044. }
  7045. static int devlink_trap_metadata_put(struct sk_buff *msg,
  7046. const struct devlink_trap *trap)
  7047. {
  7048. struct nlattr *attr;
  7049. attr = nla_nest_start(msg, DEVLINK_ATTR_TRAP_METADATA);
  7050. if (!attr)
  7051. return -EMSGSIZE;
  7052. if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT) &&
  7053. nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT))
  7054. goto nla_put_failure;
  7055. if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE) &&
  7056. nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE))
  7057. goto nla_put_failure;
  7058. nla_nest_end(msg, attr);
  7059. return 0;
  7060. nla_put_failure:
  7061. nla_nest_cancel(msg, attr);
  7062. return -EMSGSIZE;
  7063. }
  7064. static void devlink_trap_stats_read(struct devlink_stats __percpu *trap_stats,
  7065. struct devlink_stats *stats)
  7066. {
  7067. int i;
  7068. memset(stats, 0, sizeof(*stats));
  7069. for_each_possible_cpu(i) {
  7070. struct devlink_stats *cpu_stats;
  7071. u64 rx_packets, rx_bytes;
  7072. unsigned int start;
  7073. cpu_stats = per_cpu_ptr(trap_stats, i);
  7074. do {
  7075. start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
  7076. rx_packets = u64_stats_read(&cpu_stats->rx_packets);
  7077. rx_bytes = u64_stats_read(&cpu_stats->rx_bytes);
  7078. } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
  7079. u64_stats_add(&stats->rx_packets, rx_packets);
  7080. u64_stats_add(&stats->rx_bytes, rx_bytes);
  7081. }
  7082. }
  7083. static int
  7084. devlink_trap_group_stats_put(struct sk_buff *msg,
  7085. struct devlink_stats __percpu *trap_stats)
  7086. {
  7087. struct devlink_stats stats;
  7088. struct nlattr *attr;
  7089. devlink_trap_stats_read(trap_stats, &stats);
  7090. attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
  7091. if (!attr)
  7092. return -EMSGSIZE;
  7093. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
  7094. u64_stats_read(&stats.rx_packets),
  7095. DEVLINK_ATTR_PAD))
  7096. goto nla_put_failure;
  7097. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_BYTES,
  7098. u64_stats_read(&stats.rx_bytes),
  7099. DEVLINK_ATTR_PAD))
  7100. goto nla_put_failure;
  7101. nla_nest_end(msg, attr);
  7102. return 0;
  7103. nla_put_failure:
  7104. nla_nest_cancel(msg, attr);
  7105. return -EMSGSIZE;
  7106. }
  7107. static int devlink_trap_stats_put(struct sk_buff *msg, struct devlink *devlink,
  7108. const struct devlink_trap_item *trap_item)
  7109. {
  7110. struct devlink_stats stats;
  7111. struct nlattr *attr;
  7112. u64 drops = 0;
  7113. int err;
  7114. if (devlink->ops->trap_drop_counter_get) {
  7115. err = devlink->ops->trap_drop_counter_get(devlink,
  7116. trap_item->trap,
  7117. &drops);
  7118. if (err)
  7119. return err;
  7120. }
  7121. devlink_trap_stats_read(trap_item->stats, &stats);
  7122. attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
  7123. if (!attr)
  7124. return -EMSGSIZE;
  7125. if (devlink->ops->trap_drop_counter_get &&
  7126. nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_DROPPED, drops,
  7127. DEVLINK_ATTR_PAD))
  7128. goto nla_put_failure;
  7129. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
  7130. u64_stats_read(&stats.rx_packets),
  7131. DEVLINK_ATTR_PAD))
  7132. goto nla_put_failure;
  7133. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_BYTES,
  7134. u64_stats_read(&stats.rx_bytes),
  7135. DEVLINK_ATTR_PAD))
  7136. goto nla_put_failure;
  7137. nla_nest_end(msg, attr);
  7138. return 0;
  7139. nla_put_failure:
  7140. nla_nest_cancel(msg, attr);
  7141. return -EMSGSIZE;
  7142. }
  7143. static int devlink_nl_trap_fill(struct sk_buff *msg, struct devlink *devlink,
  7144. const struct devlink_trap_item *trap_item,
  7145. enum devlink_command cmd, u32 portid, u32 seq,
  7146. int flags)
  7147. {
  7148. struct devlink_trap_group_item *group_item = trap_item->group_item;
  7149. void *hdr;
  7150. int err;
  7151. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  7152. if (!hdr)
  7153. return -EMSGSIZE;
  7154. if (devlink_nl_put_handle(msg, devlink))
  7155. goto nla_put_failure;
  7156. if (nla_put_string(msg, DEVLINK_ATTR_TRAP_GROUP_NAME,
  7157. group_item->group->name))
  7158. goto nla_put_failure;
  7159. if (nla_put_string(msg, DEVLINK_ATTR_TRAP_NAME, trap_item->trap->name))
  7160. goto nla_put_failure;
  7161. if (nla_put_u8(msg, DEVLINK_ATTR_TRAP_TYPE, trap_item->trap->type))
  7162. goto nla_put_failure;
  7163. if (trap_item->trap->generic &&
  7164. nla_put_flag(msg, DEVLINK_ATTR_TRAP_GENERIC))
  7165. goto nla_put_failure;
  7166. if (nla_put_u8(msg, DEVLINK_ATTR_TRAP_ACTION, trap_item->action))
  7167. goto nla_put_failure;
  7168. err = devlink_trap_metadata_put(msg, trap_item->trap);
  7169. if (err)
  7170. goto nla_put_failure;
  7171. err = devlink_trap_stats_put(msg, devlink, trap_item);
  7172. if (err)
  7173. goto nla_put_failure;
  7174. genlmsg_end(msg, hdr);
  7175. return 0;
  7176. nla_put_failure:
  7177. genlmsg_cancel(msg, hdr);
  7178. return -EMSGSIZE;
  7179. }
  7180. static int devlink_nl_cmd_trap_get_doit(struct sk_buff *skb,
  7181. struct genl_info *info)
  7182. {
  7183. struct netlink_ext_ack *extack = info->extack;
  7184. struct devlink *devlink = info->user_ptr[0];
  7185. struct devlink_trap_item *trap_item;
  7186. struct sk_buff *msg;
  7187. int err;
  7188. if (list_empty(&devlink->trap_list))
  7189. return -EOPNOTSUPP;
  7190. trap_item = devlink_trap_item_get_from_info(devlink, info);
  7191. if (!trap_item) {
  7192. NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap");
  7193. return -ENOENT;
  7194. }
  7195. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  7196. if (!msg)
  7197. return -ENOMEM;
  7198. err = devlink_nl_trap_fill(msg, devlink, trap_item,
  7199. DEVLINK_CMD_TRAP_NEW, info->snd_portid,
  7200. info->snd_seq, 0);
  7201. if (err)
  7202. goto err_trap_fill;
  7203. return genlmsg_reply(msg, info);
  7204. err_trap_fill:
  7205. nlmsg_free(msg);
  7206. return err;
  7207. }
  7208. static int devlink_nl_cmd_trap_get_dumpit(struct sk_buff *msg,
  7209. struct netlink_callback *cb)
  7210. {
  7211. struct devlink_trap_item *trap_item;
  7212. struct devlink *devlink;
  7213. int start = cb->args[0];
  7214. unsigned long index;
  7215. int idx = 0;
  7216. int err;
  7217. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  7218. devl_lock(devlink);
  7219. list_for_each_entry(trap_item, &devlink->trap_list, list) {
  7220. if (idx < start) {
  7221. idx++;
  7222. continue;
  7223. }
  7224. err = devlink_nl_trap_fill(msg, devlink, trap_item,
  7225. DEVLINK_CMD_TRAP_NEW,
  7226. NETLINK_CB(cb->skb).portid,
  7227. cb->nlh->nlmsg_seq,
  7228. NLM_F_MULTI);
  7229. if (err) {
  7230. devl_unlock(devlink);
  7231. devlink_put(devlink);
  7232. goto out;
  7233. }
  7234. idx++;
  7235. }
  7236. devl_unlock(devlink);
  7237. devlink_put(devlink);
  7238. }
  7239. out:
  7240. cb->args[0] = idx;
  7241. return msg->len;
  7242. }
  7243. static int __devlink_trap_action_set(struct devlink *devlink,
  7244. struct devlink_trap_item *trap_item,
  7245. enum devlink_trap_action trap_action,
  7246. struct netlink_ext_ack *extack)
  7247. {
  7248. int err;
  7249. if (trap_item->action != trap_action &&
  7250. trap_item->trap->type != DEVLINK_TRAP_TYPE_DROP) {
  7251. NL_SET_ERR_MSG_MOD(extack, "Cannot change action of non-drop traps. Skipping");
  7252. return 0;
  7253. }
  7254. err = devlink->ops->trap_action_set(devlink, trap_item->trap,
  7255. trap_action, extack);
  7256. if (err)
  7257. return err;
  7258. trap_item->action = trap_action;
  7259. return 0;
  7260. }
  7261. static int devlink_trap_action_set(struct devlink *devlink,
  7262. struct devlink_trap_item *trap_item,
  7263. struct genl_info *info)
  7264. {
  7265. enum devlink_trap_action trap_action;
  7266. int err;
  7267. if (!info->attrs[DEVLINK_ATTR_TRAP_ACTION])
  7268. return 0;
  7269. err = devlink_trap_action_get_from_info(info, &trap_action);
  7270. if (err) {
  7271. NL_SET_ERR_MSG_MOD(info->extack, "Invalid trap action");
  7272. return -EINVAL;
  7273. }
  7274. return __devlink_trap_action_set(devlink, trap_item, trap_action,
  7275. info->extack);
  7276. }
  7277. static int devlink_nl_cmd_trap_set_doit(struct sk_buff *skb,
  7278. struct genl_info *info)
  7279. {
  7280. struct netlink_ext_ack *extack = info->extack;
  7281. struct devlink *devlink = info->user_ptr[0];
  7282. struct devlink_trap_item *trap_item;
  7283. if (list_empty(&devlink->trap_list))
  7284. return -EOPNOTSUPP;
  7285. trap_item = devlink_trap_item_get_from_info(devlink, info);
  7286. if (!trap_item) {
  7287. NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap");
  7288. return -ENOENT;
  7289. }
  7290. return devlink_trap_action_set(devlink, trap_item, info);
  7291. }
  7292. static struct devlink_trap_group_item *
  7293. devlink_trap_group_item_lookup(struct devlink *devlink, const char *name)
  7294. {
  7295. struct devlink_trap_group_item *group_item;
  7296. list_for_each_entry(group_item, &devlink->trap_group_list, list) {
  7297. if (!strcmp(group_item->group->name, name))
  7298. return group_item;
  7299. }
  7300. return NULL;
  7301. }
  7302. static struct devlink_trap_group_item *
  7303. devlink_trap_group_item_lookup_by_id(struct devlink *devlink, u16 id)
  7304. {
  7305. struct devlink_trap_group_item *group_item;
  7306. list_for_each_entry(group_item, &devlink->trap_group_list, list) {
  7307. if (group_item->group->id == id)
  7308. return group_item;
  7309. }
  7310. return NULL;
  7311. }
  7312. static struct devlink_trap_group_item *
  7313. devlink_trap_group_item_get_from_info(struct devlink *devlink,
  7314. struct genl_info *info)
  7315. {
  7316. char *name;
  7317. if (!info->attrs[DEVLINK_ATTR_TRAP_GROUP_NAME])
  7318. return NULL;
  7319. name = nla_data(info->attrs[DEVLINK_ATTR_TRAP_GROUP_NAME]);
  7320. return devlink_trap_group_item_lookup(devlink, name);
  7321. }
  7322. static int
  7323. devlink_nl_trap_group_fill(struct sk_buff *msg, struct devlink *devlink,
  7324. const struct devlink_trap_group_item *group_item,
  7325. enum devlink_command cmd, u32 portid, u32 seq,
  7326. int flags)
  7327. {
  7328. void *hdr;
  7329. int err;
  7330. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  7331. if (!hdr)
  7332. return -EMSGSIZE;
  7333. if (devlink_nl_put_handle(msg, devlink))
  7334. goto nla_put_failure;
  7335. if (nla_put_string(msg, DEVLINK_ATTR_TRAP_GROUP_NAME,
  7336. group_item->group->name))
  7337. goto nla_put_failure;
  7338. if (group_item->group->generic &&
  7339. nla_put_flag(msg, DEVLINK_ATTR_TRAP_GENERIC))
  7340. goto nla_put_failure;
  7341. if (group_item->policer_item &&
  7342. nla_put_u32(msg, DEVLINK_ATTR_TRAP_POLICER_ID,
  7343. group_item->policer_item->policer->id))
  7344. goto nla_put_failure;
  7345. err = devlink_trap_group_stats_put(msg, group_item->stats);
  7346. if (err)
  7347. goto nla_put_failure;
  7348. genlmsg_end(msg, hdr);
  7349. return 0;
  7350. nla_put_failure:
  7351. genlmsg_cancel(msg, hdr);
  7352. return -EMSGSIZE;
  7353. }
  7354. static int devlink_nl_cmd_trap_group_get_doit(struct sk_buff *skb,
  7355. struct genl_info *info)
  7356. {
  7357. struct netlink_ext_ack *extack = info->extack;
  7358. struct devlink *devlink = info->user_ptr[0];
  7359. struct devlink_trap_group_item *group_item;
  7360. struct sk_buff *msg;
  7361. int err;
  7362. if (list_empty(&devlink->trap_group_list))
  7363. return -EOPNOTSUPP;
  7364. group_item = devlink_trap_group_item_get_from_info(devlink, info);
  7365. if (!group_item) {
  7366. NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap group");
  7367. return -ENOENT;
  7368. }
  7369. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  7370. if (!msg)
  7371. return -ENOMEM;
  7372. err = devlink_nl_trap_group_fill(msg, devlink, group_item,
  7373. DEVLINK_CMD_TRAP_GROUP_NEW,
  7374. info->snd_portid, info->snd_seq, 0);
  7375. if (err)
  7376. goto err_trap_group_fill;
  7377. return genlmsg_reply(msg, info);
  7378. err_trap_group_fill:
  7379. nlmsg_free(msg);
  7380. return err;
  7381. }
  7382. static int devlink_nl_cmd_trap_group_get_dumpit(struct sk_buff *msg,
  7383. struct netlink_callback *cb)
  7384. {
  7385. enum devlink_command cmd = DEVLINK_CMD_TRAP_GROUP_NEW;
  7386. struct devlink_trap_group_item *group_item;
  7387. u32 portid = NETLINK_CB(cb->skb).portid;
  7388. struct devlink *devlink;
  7389. int start = cb->args[0];
  7390. unsigned long index;
  7391. int idx = 0;
  7392. int err;
  7393. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  7394. devl_lock(devlink);
  7395. list_for_each_entry(group_item, &devlink->trap_group_list,
  7396. list) {
  7397. if (idx < start) {
  7398. idx++;
  7399. continue;
  7400. }
  7401. err = devlink_nl_trap_group_fill(msg, devlink,
  7402. group_item, cmd,
  7403. portid,
  7404. cb->nlh->nlmsg_seq,
  7405. NLM_F_MULTI);
  7406. if (err) {
  7407. devl_unlock(devlink);
  7408. devlink_put(devlink);
  7409. goto out;
  7410. }
  7411. idx++;
  7412. }
  7413. devl_unlock(devlink);
  7414. devlink_put(devlink);
  7415. }
  7416. out:
  7417. cb->args[0] = idx;
  7418. return msg->len;
  7419. }
  7420. static int
  7421. __devlink_trap_group_action_set(struct devlink *devlink,
  7422. struct devlink_trap_group_item *group_item,
  7423. enum devlink_trap_action trap_action,
  7424. struct netlink_ext_ack *extack)
  7425. {
  7426. const char *group_name = group_item->group->name;
  7427. struct devlink_trap_item *trap_item;
  7428. int err;
  7429. if (devlink->ops->trap_group_action_set) {
  7430. err = devlink->ops->trap_group_action_set(devlink, group_item->group,
  7431. trap_action, extack);
  7432. if (err)
  7433. return err;
  7434. list_for_each_entry(trap_item, &devlink->trap_list, list) {
  7435. if (strcmp(trap_item->group_item->group->name, group_name))
  7436. continue;
  7437. if (trap_item->action != trap_action &&
  7438. trap_item->trap->type != DEVLINK_TRAP_TYPE_DROP)
  7439. continue;
  7440. trap_item->action = trap_action;
  7441. }
  7442. return 0;
  7443. }
  7444. list_for_each_entry(trap_item, &devlink->trap_list, list) {
  7445. if (strcmp(trap_item->group_item->group->name, group_name))
  7446. continue;
  7447. err = __devlink_trap_action_set(devlink, trap_item,
  7448. trap_action, extack);
  7449. if (err)
  7450. return err;
  7451. }
  7452. return 0;
  7453. }
  7454. static int
  7455. devlink_trap_group_action_set(struct devlink *devlink,
  7456. struct devlink_trap_group_item *group_item,
  7457. struct genl_info *info, bool *p_modified)
  7458. {
  7459. enum devlink_trap_action trap_action;
  7460. int err;
  7461. if (!info->attrs[DEVLINK_ATTR_TRAP_ACTION])
  7462. return 0;
  7463. err = devlink_trap_action_get_from_info(info, &trap_action);
  7464. if (err) {
  7465. NL_SET_ERR_MSG_MOD(info->extack, "Invalid trap action");
  7466. return -EINVAL;
  7467. }
  7468. err = __devlink_trap_group_action_set(devlink, group_item, trap_action,
  7469. info->extack);
  7470. if (err)
  7471. return err;
  7472. *p_modified = true;
  7473. return 0;
  7474. }
  7475. static int devlink_trap_group_set(struct devlink *devlink,
  7476. struct devlink_trap_group_item *group_item,
  7477. struct genl_info *info)
  7478. {
  7479. struct devlink_trap_policer_item *policer_item;
  7480. struct netlink_ext_ack *extack = info->extack;
  7481. const struct devlink_trap_policer *policer;
  7482. struct nlattr **attrs = info->attrs;
  7483. int err;
  7484. if (!attrs[DEVLINK_ATTR_TRAP_POLICER_ID])
  7485. return 0;
  7486. if (!devlink->ops->trap_group_set)
  7487. return -EOPNOTSUPP;
  7488. policer_item = group_item->policer_item;
  7489. if (attrs[DEVLINK_ATTR_TRAP_POLICER_ID]) {
  7490. u32 policer_id;
  7491. policer_id = nla_get_u32(attrs[DEVLINK_ATTR_TRAP_POLICER_ID]);
  7492. policer_item = devlink_trap_policer_item_lookup(devlink,
  7493. policer_id);
  7494. if (policer_id && !policer_item) {
  7495. NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap policer");
  7496. return -ENOENT;
  7497. }
  7498. }
  7499. policer = policer_item ? policer_item->policer : NULL;
  7500. err = devlink->ops->trap_group_set(devlink, group_item->group, policer,
  7501. extack);
  7502. if (err)
  7503. return err;
  7504. group_item->policer_item = policer_item;
  7505. return 0;
  7506. }
  7507. static int devlink_nl_cmd_trap_group_set_doit(struct sk_buff *skb,
  7508. struct genl_info *info)
  7509. {
  7510. struct netlink_ext_ack *extack = info->extack;
  7511. struct devlink *devlink = info->user_ptr[0];
  7512. struct devlink_trap_group_item *group_item;
  7513. bool modified = false;
  7514. int err;
  7515. if (list_empty(&devlink->trap_group_list))
  7516. return -EOPNOTSUPP;
  7517. group_item = devlink_trap_group_item_get_from_info(devlink, info);
  7518. if (!group_item) {
  7519. NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap group");
  7520. return -ENOENT;
  7521. }
  7522. err = devlink_trap_group_action_set(devlink, group_item, info,
  7523. &modified);
  7524. if (err)
  7525. return err;
  7526. err = devlink_trap_group_set(devlink, group_item, info);
  7527. if (err)
  7528. goto err_trap_group_set;
  7529. return 0;
  7530. err_trap_group_set:
  7531. if (modified)
  7532. NL_SET_ERR_MSG_MOD(extack, "Trap group set failed, but some changes were committed already");
  7533. return err;
  7534. }
  7535. static struct devlink_trap_policer_item *
  7536. devlink_trap_policer_item_get_from_info(struct devlink *devlink,
  7537. struct genl_info *info)
  7538. {
  7539. u32 id;
  7540. if (!info->attrs[DEVLINK_ATTR_TRAP_POLICER_ID])
  7541. return NULL;
  7542. id = nla_get_u32(info->attrs[DEVLINK_ATTR_TRAP_POLICER_ID]);
  7543. return devlink_trap_policer_item_lookup(devlink, id);
  7544. }
  7545. static int
  7546. devlink_trap_policer_stats_put(struct sk_buff *msg, struct devlink *devlink,
  7547. const struct devlink_trap_policer *policer)
  7548. {
  7549. struct nlattr *attr;
  7550. u64 drops;
  7551. int err;
  7552. if (!devlink->ops->trap_policer_counter_get)
  7553. return 0;
  7554. err = devlink->ops->trap_policer_counter_get(devlink, policer, &drops);
  7555. if (err)
  7556. return err;
  7557. attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
  7558. if (!attr)
  7559. return -EMSGSIZE;
  7560. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_DROPPED, drops,
  7561. DEVLINK_ATTR_PAD))
  7562. goto nla_put_failure;
  7563. nla_nest_end(msg, attr);
  7564. return 0;
  7565. nla_put_failure:
  7566. nla_nest_cancel(msg, attr);
  7567. return -EMSGSIZE;
  7568. }
  7569. static int
  7570. devlink_nl_trap_policer_fill(struct sk_buff *msg, struct devlink *devlink,
  7571. const struct devlink_trap_policer_item *policer_item,
  7572. enum devlink_command cmd, u32 portid, u32 seq,
  7573. int flags)
  7574. {
  7575. void *hdr;
  7576. int err;
  7577. hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
  7578. if (!hdr)
  7579. return -EMSGSIZE;
  7580. if (devlink_nl_put_handle(msg, devlink))
  7581. goto nla_put_failure;
  7582. if (nla_put_u32(msg, DEVLINK_ATTR_TRAP_POLICER_ID,
  7583. policer_item->policer->id))
  7584. goto nla_put_failure;
  7585. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_TRAP_POLICER_RATE,
  7586. policer_item->rate, DEVLINK_ATTR_PAD))
  7587. goto nla_put_failure;
  7588. if (nla_put_u64_64bit(msg, DEVLINK_ATTR_TRAP_POLICER_BURST,
  7589. policer_item->burst, DEVLINK_ATTR_PAD))
  7590. goto nla_put_failure;
  7591. err = devlink_trap_policer_stats_put(msg, devlink,
  7592. policer_item->policer);
  7593. if (err)
  7594. goto nla_put_failure;
  7595. genlmsg_end(msg, hdr);
  7596. return 0;
  7597. nla_put_failure:
  7598. genlmsg_cancel(msg, hdr);
  7599. return -EMSGSIZE;
  7600. }
  7601. static int devlink_nl_cmd_trap_policer_get_doit(struct sk_buff *skb,
  7602. struct genl_info *info)
  7603. {
  7604. struct devlink_trap_policer_item *policer_item;
  7605. struct netlink_ext_ack *extack = info->extack;
  7606. struct devlink *devlink = info->user_ptr[0];
  7607. struct sk_buff *msg;
  7608. int err;
  7609. if (list_empty(&devlink->trap_policer_list))
  7610. return -EOPNOTSUPP;
  7611. policer_item = devlink_trap_policer_item_get_from_info(devlink, info);
  7612. if (!policer_item) {
  7613. NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap policer");
  7614. return -ENOENT;
  7615. }
  7616. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  7617. if (!msg)
  7618. return -ENOMEM;
  7619. err = devlink_nl_trap_policer_fill(msg, devlink, policer_item,
  7620. DEVLINK_CMD_TRAP_POLICER_NEW,
  7621. info->snd_portid, info->snd_seq, 0);
  7622. if (err)
  7623. goto err_trap_policer_fill;
  7624. return genlmsg_reply(msg, info);
  7625. err_trap_policer_fill:
  7626. nlmsg_free(msg);
  7627. return err;
  7628. }
  7629. static int devlink_nl_cmd_trap_policer_get_dumpit(struct sk_buff *msg,
  7630. struct netlink_callback *cb)
  7631. {
  7632. enum devlink_command cmd = DEVLINK_CMD_TRAP_POLICER_NEW;
  7633. struct devlink_trap_policer_item *policer_item;
  7634. u32 portid = NETLINK_CB(cb->skb).portid;
  7635. struct devlink *devlink;
  7636. int start = cb->args[0];
  7637. unsigned long index;
  7638. int idx = 0;
  7639. int err;
  7640. devlinks_xa_for_each_registered_get(sock_net(msg->sk), index, devlink) {
  7641. devl_lock(devlink);
  7642. list_for_each_entry(policer_item, &devlink->trap_policer_list,
  7643. list) {
  7644. if (idx < start) {
  7645. idx++;
  7646. continue;
  7647. }
  7648. err = devlink_nl_trap_policer_fill(msg, devlink,
  7649. policer_item, cmd,
  7650. portid,
  7651. cb->nlh->nlmsg_seq,
  7652. NLM_F_MULTI);
  7653. if (err) {
  7654. devl_unlock(devlink);
  7655. devlink_put(devlink);
  7656. goto out;
  7657. }
  7658. idx++;
  7659. }
  7660. devl_unlock(devlink);
  7661. devlink_put(devlink);
  7662. }
  7663. out:
  7664. cb->args[0] = idx;
  7665. return msg->len;
  7666. }
  7667. static int
  7668. devlink_trap_policer_set(struct devlink *devlink,
  7669. struct devlink_trap_policer_item *policer_item,
  7670. struct genl_info *info)
  7671. {
  7672. struct netlink_ext_ack *extack = info->extack;
  7673. struct nlattr **attrs = info->attrs;
  7674. u64 rate, burst;
  7675. int err;
  7676. rate = policer_item->rate;
  7677. burst = policer_item->burst;
  7678. if (attrs[DEVLINK_ATTR_TRAP_POLICER_RATE])
  7679. rate = nla_get_u64(attrs[DEVLINK_ATTR_TRAP_POLICER_RATE]);
  7680. if (attrs[DEVLINK_ATTR_TRAP_POLICER_BURST])
  7681. burst = nla_get_u64(attrs[DEVLINK_ATTR_TRAP_POLICER_BURST]);
  7682. if (rate < policer_item->policer->min_rate) {
  7683. NL_SET_ERR_MSG_MOD(extack, "Policer rate lower than limit");
  7684. return -EINVAL;
  7685. }
  7686. if (rate > policer_item->policer->max_rate) {
  7687. NL_SET_ERR_MSG_MOD(extack, "Policer rate higher than limit");
  7688. return -EINVAL;
  7689. }
  7690. if (burst < policer_item->policer->min_burst) {
  7691. NL_SET_ERR_MSG_MOD(extack, "Policer burst size lower than limit");
  7692. return -EINVAL;
  7693. }
  7694. if (burst > policer_item->policer->max_burst) {
  7695. NL_SET_ERR_MSG_MOD(extack, "Policer burst size higher than limit");
  7696. return -EINVAL;
  7697. }
  7698. err = devlink->ops->trap_policer_set(devlink, policer_item->policer,
  7699. rate, burst, info->extack);
  7700. if (err)
  7701. return err;
  7702. policer_item->rate = rate;
  7703. policer_item->burst = burst;
  7704. return 0;
  7705. }
  7706. static int devlink_nl_cmd_trap_policer_set_doit(struct sk_buff *skb,
  7707. struct genl_info *info)
  7708. {
  7709. struct devlink_trap_policer_item *policer_item;
  7710. struct netlink_ext_ack *extack = info->extack;
  7711. struct devlink *devlink = info->user_ptr[0];
  7712. if (list_empty(&devlink->trap_policer_list))
  7713. return -EOPNOTSUPP;
  7714. if (!devlink->ops->trap_policer_set)
  7715. return -EOPNOTSUPP;
  7716. policer_item = devlink_trap_policer_item_get_from_info(devlink, info);
  7717. if (!policer_item) {
  7718. NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap policer");
  7719. return -ENOENT;
  7720. }
  7721. return devlink_trap_policer_set(devlink, policer_item, info);
  7722. }
  7723. static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
  7724. [DEVLINK_ATTR_UNSPEC] = { .strict_start_type =
  7725. DEVLINK_ATTR_TRAP_POLICER_ID },
  7726. [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
  7727. [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
  7728. [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
  7729. [DEVLINK_ATTR_PORT_TYPE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_PORT_TYPE_AUTO,
  7730. DEVLINK_PORT_TYPE_IB),
  7731. [DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 },
  7732. [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32 },
  7733. [DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16 },
  7734. [DEVLINK_ATTR_SB_POOL_TYPE] = { .type = NLA_U8 },
  7735. [DEVLINK_ATTR_SB_POOL_SIZE] = { .type = NLA_U32 },
  7736. [DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 },
  7737. [DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 },
  7738. [DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
  7739. [DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_ESWITCH_MODE_LEGACY,
  7740. DEVLINK_ESWITCH_MODE_SWITCHDEV),
  7741. [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = { .type = NLA_U8 },
  7742. [DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 },
  7743. [DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
  7744. [DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 },
  7745. [DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64},
  7746. [DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64},
  7747. [DEVLINK_ATTR_PARAM_NAME] = { .type = NLA_NUL_STRING },
  7748. [DEVLINK_ATTR_PARAM_TYPE] = { .type = NLA_U8 },
  7749. [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
  7750. [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
  7751. [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
  7752. [DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64 },
  7753. [DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64 },
  7754. [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING },
  7755. [DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 },
  7756. [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 },
  7757. [DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME] = { .type = NLA_NUL_STRING },
  7758. [DEVLINK_ATTR_FLASH_UPDATE_COMPONENT] = { .type = NLA_NUL_STRING },
  7759. [DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK] =
  7760. NLA_POLICY_BITFIELD32(DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS),
  7761. [DEVLINK_ATTR_TRAP_NAME] = { .type = NLA_NUL_STRING },
  7762. [DEVLINK_ATTR_TRAP_ACTION] = { .type = NLA_U8 },
  7763. [DEVLINK_ATTR_TRAP_GROUP_NAME] = { .type = NLA_NUL_STRING },
  7764. [DEVLINK_ATTR_NETNS_PID] = { .type = NLA_U32 },
  7765. [DEVLINK_ATTR_NETNS_FD] = { .type = NLA_U32 },
  7766. [DEVLINK_ATTR_NETNS_ID] = { .type = NLA_U32 },
  7767. [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP] = { .type = NLA_U8 },
  7768. [DEVLINK_ATTR_TRAP_POLICER_ID] = { .type = NLA_U32 },
  7769. [DEVLINK_ATTR_TRAP_POLICER_RATE] = { .type = NLA_U64 },
  7770. [DEVLINK_ATTR_TRAP_POLICER_BURST] = { .type = NLA_U64 },
  7771. [DEVLINK_ATTR_PORT_FUNCTION] = { .type = NLA_NESTED },
  7772. [DEVLINK_ATTR_RELOAD_ACTION] = NLA_POLICY_RANGE(NLA_U8, DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
  7773. DEVLINK_RELOAD_ACTION_MAX),
  7774. [DEVLINK_ATTR_RELOAD_LIMITS] = NLA_POLICY_BITFIELD32(DEVLINK_RELOAD_LIMITS_VALID_MASK),
  7775. [DEVLINK_ATTR_PORT_FLAVOUR] = { .type = NLA_U16 },
  7776. [DEVLINK_ATTR_PORT_PCI_PF_NUMBER] = { .type = NLA_U16 },
  7777. [DEVLINK_ATTR_PORT_PCI_SF_NUMBER] = { .type = NLA_U32 },
  7778. [DEVLINK_ATTR_PORT_CONTROLLER_NUMBER] = { .type = NLA_U32 },
  7779. [DEVLINK_ATTR_RATE_TYPE] = { .type = NLA_U16 },
  7780. [DEVLINK_ATTR_RATE_TX_SHARE] = { .type = NLA_U64 },
  7781. [DEVLINK_ATTR_RATE_TX_MAX] = { .type = NLA_U64 },
  7782. [DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING },
  7783. [DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING },
  7784. [DEVLINK_ATTR_LINECARD_INDEX] = { .type = NLA_U32 },
  7785. [DEVLINK_ATTR_LINECARD_TYPE] = { .type = NLA_NUL_STRING },
  7786. [DEVLINK_ATTR_SELFTESTS] = { .type = NLA_NESTED },
  7787. };
  7788. static const struct genl_small_ops devlink_nl_ops[] = {
  7789. {
  7790. .cmd = DEVLINK_CMD_GET,
  7791. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7792. .doit = devlink_nl_cmd_get_doit,
  7793. .dumpit = devlink_nl_cmd_get_dumpit,
  7794. /* can be retrieved by unprivileged users */
  7795. },
  7796. {
  7797. .cmd = DEVLINK_CMD_PORT_GET,
  7798. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7799. .doit = devlink_nl_cmd_port_get_doit,
  7800. .dumpit = devlink_nl_cmd_port_get_dumpit,
  7801. .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
  7802. /* can be retrieved by unprivileged users */
  7803. },
  7804. {
  7805. .cmd = DEVLINK_CMD_PORT_SET,
  7806. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7807. .doit = devlink_nl_cmd_port_set_doit,
  7808. .flags = GENL_ADMIN_PERM,
  7809. .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
  7810. },
  7811. {
  7812. .cmd = DEVLINK_CMD_RATE_GET,
  7813. .doit = devlink_nl_cmd_rate_get_doit,
  7814. .dumpit = devlink_nl_cmd_rate_get_dumpit,
  7815. .internal_flags = DEVLINK_NL_FLAG_NEED_RATE,
  7816. /* can be retrieved by unprivileged users */
  7817. },
  7818. {
  7819. .cmd = DEVLINK_CMD_RATE_SET,
  7820. .doit = devlink_nl_cmd_rate_set_doit,
  7821. .flags = GENL_ADMIN_PERM,
  7822. .internal_flags = DEVLINK_NL_FLAG_NEED_RATE,
  7823. },
  7824. {
  7825. .cmd = DEVLINK_CMD_RATE_NEW,
  7826. .doit = devlink_nl_cmd_rate_new_doit,
  7827. .flags = GENL_ADMIN_PERM,
  7828. },
  7829. {
  7830. .cmd = DEVLINK_CMD_RATE_DEL,
  7831. .doit = devlink_nl_cmd_rate_del_doit,
  7832. .flags = GENL_ADMIN_PERM,
  7833. .internal_flags = DEVLINK_NL_FLAG_NEED_RATE_NODE,
  7834. },
  7835. {
  7836. .cmd = DEVLINK_CMD_PORT_SPLIT,
  7837. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7838. .doit = devlink_nl_cmd_port_split_doit,
  7839. .flags = GENL_ADMIN_PERM,
  7840. .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
  7841. },
  7842. {
  7843. .cmd = DEVLINK_CMD_PORT_UNSPLIT,
  7844. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7845. .doit = devlink_nl_cmd_port_unsplit_doit,
  7846. .flags = GENL_ADMIN_PERM,
  7847. .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
  7848. },
  7849. {
  7850. .cmd = DEVLINK_CMD_PORT_NEW,
  7851. .doit = devlink_nl_cmd_port_new_doit,
  7852. .flags = GENL_ADMIN_PERM,
  7853. },
  7854. {
  7855. .cmd = DEVLINK_CMD_PORT_DEL,
  7856. .doit = devlink_nl_cmd_port_del_doit,
  7857. .flags = GENL_ADMIN_PERM,
  7858. },
  7859. {
  7860. .cmd = DEVLINK_CMD_LINECARD_GET,
  7861. .doit = devlink_nl_cmd_linecard_get_doit,
  7862. .dumpit = devlink_nl_cmd_linecard_get_dumpit,
  7863. .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD,
  7864. /* can be retrieved by unprivileged users */
  7865. },
  7866. {
  7867. .cmd = DEVLINK_CMD_LINECARD_SET,
  7868. .doit = devlink_nl_cmd_linecard_set_doit,
  7869. .flags = GENL_ADMIN_PERM,
  7870. .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD,
  7871. },
  7872. {
  7873. .cmd = DEVLINK_CMD_SB_GET,
  7874. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7875. .doit = devlink_nl_cmd_sb_get_doit,
  7876. .dumpit = devlink_nl_cmd_sb_get_dumpit,
  7877. /* can be retrieved by unprivileged users */
  7878. },
  7879. {
  7880. .cmd = DEVLINK_CMD_SB_POOL_GET,
  7881. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7882. .doit = devlink_nl_cmd_sb_pool_get_doit,
  7883. .dumpit = devlink_nl_cmd_sb_pool_get_dumpit,
  7884. /* can be retrieved by unprivileged users */
  7885. },
  7886. {
  7887. .cmd = DEVLINK_CMD_SB_POOL_SET,
  7888. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7889. .doit = devlink_nl_cmd_sb_pool_set_doit,
  7890. .flags = GENL_ADMIN_PERM,
  7891. },
  7892. {
  7893. .cmd = DEVLINK_CMD_SB_PORT_POOL_GET,
  7894. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7895. .doit = devlink_nl_cmd_sb_port_pool_get_doit,
  7896. .dumpit = devlink_nl_cmd_sb_port_pool_get_dumpit,
  7897. .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
  7898. /* can be retrieved by unprivileged users */
  7899. },
  7900. {
  7901. .cmd = DEVLINK_CMD_SB_PORT_POOL_SET,
  7902. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7903. .doit = devlink_nl_cmd_sb_port_pool_set_doit,
  7904. .flags = GENL_ADMIN_PERM,
  7905. .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
  7906. },
  7907. {
  7908. .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET,
  7909. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7910. .doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit,
  7911. .dumpit = devlink_nl_cmd_sb_tc_pool_bind_get_dumpit,
  7912. .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
  7913. /* can be retrieved by unprivileged users */
  7914. },
  7915. {
  7916. .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET,
  7917. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7918. .doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit,
  7919. .flags = GENL_ADMIN_PERM,
  7920. .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
  7921. },
  7922. {
  7923. .cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT,
  7924. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7925. .doit = devlink_nl_cmd_sb_occ_snapshot_doit,
  7926. .flags = GENL_ADMIN_PERM,
  7927. },
  7928. {
  7929. .cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
  7930. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7931. .doit = devlink_nl_cmd_sb_occ_max_clear_doit,
  7932. .flags = GENL_ADMIN_PERM,
  7933. },
  7934. {
  7935. .cmd = DEVLINK_CMD_ESWITCH_GET,
  7936. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7937. .doit = devlink_nl_cmd_eswitch_get_doit,
  7938. .flags = GENL_ADMIN_PERM,
  7939. },
  7940. {
  7941. .cmd = DEVLINK_CMD_ESWITCH_SET,
  7942. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7943. .doit = devlink_nl_cmd_eswitch_set_doit,
  7944. .flags = GENL_ADMIN_PERM,
  7945. },
  7946. {
  7947. .cmd = DEVLINK_CMD_DPIPE_TABLE_GET,
  7948. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7949. .doit = devlink_nl_cmd_dpipe_table_get,
  7950. /* can be retrieved by unprivileged users */
  7951. },
  7952. {
  7953. .cmd = DEVLINK_CMD_DPIPE_ENTRIES_GET,
  7954. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7955. .doit = devlink_nl_cmd_dpipe_entries_get,
  7956. /* can be retrieved by unprivileged users */
  7957. },
  7958. {
  7959. .cmd = DEVLINK_CMD_DPIPE_HEADERS_GET,
  7960. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7961. .doit = devlink_nl_cmd_dpipe_headers_get,
  7962. /* can be retrieved by unprivileged users */
  7963. },
  7964. {
  7965. .cmd = DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
  7966. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7967. .doit = devlink_nl_cmd_dpipe_table_counters_set,
  7968. .flags = GENL_ADMIN_PERM,
  7969. },
  7970. {
  7971. .cmd = DEVLINK_CMD_RESOURCE_SET,
  7972. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7973. .doit = devlink_nl_cmd_resource_set,
  7974. .flags = GENL_ADMIN_PERM,
  7975. },
  7976. {
  7977. .cmd = DEVLINK_CMD_RESOURCE_DUMP,
  7978. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7979. .doit = devlink_nl_cmd_resource_dump,
  7980. /* can be retrieved by unprivileged users */
  7981. },
  7982. {
  7983. .cmd = DEVLINK_CMD_RELOAD,
  7984. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7985. .doit = devlink_nl_cmd_reload,
  7986. .flags = GENL_ADMIN_PERM,
  7987. },
  7988. {
  7989. .cmd = DEVLINK_CMD_PARAM_GET,
  7990. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7991. .doit = devlink_nl_cmd_param_get_doit,
  7992. .dumpit = devlink_nl_cmd_param_get_dumpit,
  7993. /* can be retrieved by unprivileged users */
  7994. },
  7995. {
  7996. .cmd = DEVLINK_CMD_PARAM_SET,
  7997. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  7998. .doit = devlink_nl_cmd_param_set_doit,
  7999. .flags = GENL_ADMIN_PERM,
  8000. },
  8001. {
  8002. .cmd = DEVLINK_CMD_PORT_PARAM_GET,
  8003. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  8004. .doit = devlink_nl_cmd_port_param_get_doit,
  8005. .dumpit = devlink_nl_cmd_port_param_get_dumpit,
  8006. .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
  8007. /* can be retrieved by unprivileged users */
  8008. },
  8009. {
  8010. .cmd = DEVLINK_CMD_PORT_PARAM_SET,
  8011. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  8012. .doit = devlink_nl_cmd_port_param_set_doit,
  8013. .flags = GENL_ADMIN_PERM,
  8014. .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
  8015. },
  8016. {
  8017. .cmd = DEVLINK_CMD_REGION_GET,
  8018. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  8019. .doit = devlink_nl_cmd_region_get_doit,
  8020. .dumpit = devlink_nl_cmd_region_get_dumpit,
  8021. .flags = GENL_ADMIN_PERM,
  8022. },
  8023. {
  8024. .cmd = DEVLINK_CMD_REGION_NEW,
  8025. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  8026. .doit = devlink_nl_cmd_region_new,
  8027. .flags = GENL_ADMIN_PERM,
  8028. },
  8029. {
  8030. .cmd = DEVLINK_CMD_REGION_DEL,
  8031. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  8032. .doit = devlink_nl_cmd_region_del,
  8033. .flags = GENL_ADMIN_PERM,
  8034. },
  8035. {
  8036. .cmd = DEVLINK_CMD_REGION_READ,
  8037. .validate = GENL_DONT_VALIDATE_STRICT |
  8038. GENL_DONT_VALIDATE_DUMP_STRICT,
  8039. .dumpit = devlink_nl_cmd_region_read_dumpit,
  8040. .flags = GENL_ADMIN_PERM,
  8041. },
  8042. {
  8043. .cmd = DEVLINK_CMD_INFO_GET,
  8044. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  8045. .doit = devlink_nl_cmd_info_get_doit,
  8046. .dumpit = devlink_nl_cmd_info_get_dumpit,
  8047. /* can be retrieved by unprivileged users */
  8048. },
  8049. {
  8050. .cmd = DEVLINK_CMD_HEALTH_REPORTER_GET,
  8051. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  8052. .doit = devlink_nl_cmd_health_reporter_get_doit,
  8053. .dumpit = devlink_nl_cmd_health_reporter_get_dumpit,
  8054. .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
  8055. /* can be retrieved by unprivileged users */
  8056. },
  8057. {
  8058. .cmd = DEVLINK_CMD_HEALTH_REPORTER_SET,
  8059. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  8060. .doit = devlink_nl_cmd_health_reporter_set_doit,
  8061. .flags = GENL_ADMIN_PERM,
  8062. .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
  8063. },
  8064. {
  8065. .cmd = DEVLINK_CMD_HEALTH_REPORTER_RECOVER,
  8066. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  8067. .doit = devlink_nl_cmd_health_reporter_recover_doit,
  8068. .flags = GENL_ADMIN_PERM,
  8069. .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
  8070. },
  8071. {
  8072. .cmd = DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE,
  8073. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  8074. .doit = devlink_nl_cmd_health_reporter_diagnose_doit,
  8075. .flags = GENL_ADMIN_PERM,
  8076. .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
  8077. },
  8078. {
  8079. .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET,
  8080. .validate = GENL_DONT_VALIDATE_STRICT |
  8081. GENL_DONT_VALIDATE_DUMP_STRICT,
  8082. .dumpit = devlink_nl_cmd_health_reporter_dump_get_dumpit,
  8083. .flags = GENL_ADMIN_PERM,
  8084. },
  8085. {
  8086. .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR,
  8087. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  8088. .doit = devlink_nl_cmd_health_reporter_dump_clear_doit,
  8089. .flags = GENL_ADMIN_PERM,
  8090. .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
  8091. },
  8092. {
  8093. .cmd = DEVLINK_CMD_HEALTH_REPORTER_TEST,
  8094. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  8095. .doit = devlink_nl_cmd_health_reporter_test_doit,
  8096. .flags = GENL_ADMIN_PERM,
  8097. .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
  8098. },
  8099. {
  8100. .cmd = DEVLINK_CMD_FLASH_UPDATE,
  8101. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  8102. .doit = devlink_nl_cmd_flash_update,
  8103. .flags = GENL_ADMIN_PERM,
  8104. },
  8105. {
  8106. .cmd = DEVLINK_CMD_TRAP_GET,
  8107. .doit = devlink_nl_cmd_trap_get_doit,
  8108. .dumpit = devlink_nl_cmd_trap_get_dumpit,
  8109. /* can be retrieved by unprivileged users */
  8110. },
  8111. {
  8112. .cmd = DEVLINK_CMD_TRAP_SET,
  8113. .doit = devlink_nl_cmd_trap_set_doit,
  8114. .flags = GENL_ADMIN_PERM,
  8115. },
  8116. {
  8117. .cmd = DEVLINK_CMD_TRAP_GROUP_GET,
  8118. .doit = devlink_nl_cmd_trap_group_get_doit,
  8119. .dumpit = devlink_nl_cmd_trap_group_get_dumpit,
  8120. /* can be retrieved by unprivileged users */
  8121. },
  8122. {
  8123. .cmd = DEVLINK_CMD_TRAP_GROUP_SET,
  8124. .doit = devlink_nl_cmd_trap_group_set_doit,
  8125. .flags = GENL_ADMIN_PERM,
  8126. },
  8127. {
  8128. .cmd = DEVLINK_CMD_TRAP_POLICER_GET,
  8129. .doit = devlink_nl_cmd_trap_policer_get_doit,
  8130. .dumpit = devlink_nl_cmd_trap_policer_get_dumpit,
  8131. /* can be retrieved by unprivileged users */
  8132. },
  8133. {
  8134. .cmd = DEVLINK_CMD_TRAP_POLICER_SET,
  8135. .doit = devlink_nl_cmd_trap_policer_set_doit,
  8136. .flags = GENL_ADMIN_PERM,
  8137. },
  8138. {
  8139. .cmd = DEVLINK_CMD_SELFTESTS_GET,
  8140. .doit = devlink_nl_cmd_selftests_get_doit,
  8141. .dumpit = devlink_nl_cmd_selftests_get_dumpit
  8142. /* can be retrieved by unprivileged users */
  8143. },
  8144. {
  8145. .cmd = DEVLINK_CMD_SELFTESTS_RUN,
  8146. .doit = devlink_nl_cmd_selftests_run,
  8147. .flags = GENL_ADMIN_PERM,
  8148. },
  8149. };
  8150. static struct genl_family devlink_nl_family __ro_after_init = {
  8151. .name = DEVLINK_GENL_NAME,
  8152. .version = DEVLINK_GENL_VERSION,
  8153. .maxattr = DEVLINK_ATTR_MAX,
  8154. .policy = devlink_nl_policy,
  8155. .netnsok = true,
  8156. .parallel_ops = true,
  8157. .pre_doit = devlink_nl_pre_doit,
  8158. .post_doit = devlink_nl_post_doit,
  8159. .module = THIS_MODULE,
  8160. .small_ops = devlink_nl_ops,
  8161. .n_small_ops = ARRAY_SIZE(devlink_nl_ops),
  8162. .resv_start_op = DEVLINK_CMD_SELFTESTS_RUN + 1,
  8163. .mcgrps = devlink_nl_mcgrps,
  8164. .n_mcgrps = ARRAY_SIZE(devlink_nl_mcgrps),
  8165. };
  8166. static bool devlink_reload_actions_valid(const struct devlink_ops *ops)
  8167. {
  8168. const struct devlink_reload_combination *comb;
  8169. int i;
  8170. if (!devlink_reload_supported(ops)) {
  8171. if (WARN_ON(ops->reload_actions))
  8172. return false;
  8173. return true;
  8174. }
  8175. if (WARN_ON(!ops->reload_actions ||
  8176. ops->reload_actions & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) ||
  8177. ops->reload_actions >= BIT(__DEVLINK_RELOAD_ACTION_MAX)))
  8178. return false;
  8179. if (WARN_ON(ops->reload_limits & BIT(DEVLINK_RELOAD_LIMIT_UNSPEC) ||
  8180. ops->reload_limits >= BIT(__DEVLINK_RELOAD_LIMIT_MAX)))
  8181. return false;
  8182. for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++) {
  8183. comb = &devlink_reload_invalid_combinations[i];
  8184. if (ops->reload_actions == BIT(comb->action) &&
  8185. ops->reload_limits == BIT(comb->limit))
  8186. return false;
  8187. }
  8188. return true;
  8189. }
  8190. /**
  8191. * devlink_set_features - Set devlink supported features
  8192. *
  8193. * @devlink: devlink
  8194. * @features: devlink support features
  8195. *
  8196. * This interface allows us to set reload ops separatelly from
  8197. * the devlink_alloc.
  8198. */
  8199. void devlink_set_features(struct devlink *devlink, u64 features)
  8200. {
  8201. ASSERT_DEVLINK_NOT_REGISTERED(devlink);
  8202. WARN_ON(features & DEVLINK_F_RELOAD &&
  8203. !devlink_reload_supported(devlink->ops));
  8204. devlink->features = features;
  8205. }
  8206. EXPORT_SYMBOL_GPL(devlink_set_features);
  8207. /**
  8208. * devlink_alloc_ns - Allocate new devlink instance resources
  8209. * in specific namespace
  8210. *
  8211. * @ops: ops
  8212. * @priv_size: size of user private data
  8213. * @net: net namespace
  8214. * @dev: parent device
  8215. *
  8216. * Allocate new devlink instance resources, including devlink index
  8217. * and name.
  8218. */
  8219. struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
  8220. size_t priv_size, struct net *net,
  8221. struct device *dev)
  8222. {
  8223. struct devlink *devlink;
  8224. static u32 last_id;
  8225. int ret;
  8226. WARN_ON(!ops || !dev);
  8227. if (!devlink_reload_actions_valid(ops))
  8228. return NULL;
  8229. devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
  8230. if (!devlink)
  8231. return NULL;
  8232. ret = xa_alloc_cyclic(&devlinks, &devlink->index, devlink, xa_limit_31b,
  8233. &last_id, GFP_KERNEL);
  8234. if (ret < 0) {
  8235. kfree(devlink);
  8236. return NULL;
  8237. }
  8238. devlink->dev = dev;
  8239. devlink->ops = ops;
  8240. xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC);
  8241. write_pnet(&devlink->_net, net);
  8242. INIT_LIST_HEAD(&devlink->port_list);
  8243. INIT_LIST_HEAD(&devlink->rate_list);
  8244. INIT_LIST_HEAD(&devlink->linecard_list);
  8245. INIT_LIST_HEAD(&devlink->sb_list);
  8246. INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
  8247. INIT_LIST_HEAD(&devlink->resource_list);
  8248. INIT_LIST_HEAD(&devlink->param_list);
  8249. INIT_LIST_HEAD(&devlink->region_list);
  8250. INIT_LIST_HEAD(&devlink->reporter_list);
  8251. INIT_LIST_HEAD(&devlink->trap_list);
  8252. INIT_LIST_HEAD(&devlink->trap_group_list);
  8253. INIT_LIST_HEAD(&devlink->trap_policer_list);
  8254. lockdep_register_key(&devlink->lock_key);
  8255. mutex_init(&devlink->lock);
  8256. lockdep_set_class(&devlink->lock, &devlink->lock_key);
  8257. mutex_init(&devlink->reporters_lock);
  8258. mutex_init(&devlink->linecards_lock);
  8259. refcount_set(&devlink->refcount, 1);
  8260. init_completion(&devlink->comp);
  8261. return devlink;
  8262. }
  8263. EXPORT_SYMBOL_GPL(devlink_alloc_ns);
  8264. static void
  8265. devlink_trap_policer_notify(struct devlink *devlink,
  8266. const struct devlink_trap_policer_item *policer_item,
  8267. enum devlink_command cmd);
  8268. static void
  8269. devlink_trap_group_notify(struct devlink *devlink,
  8270. const struct devlink_trap_group_item *group_item,
  8271. enum devlink_command cmd);
  8272. static void devlink_trap_notify(struct devlink *devlink,
  8273. const struct devlink_trap_item *trap_item,
  8274. enum devlink_command cmd);
  8275. static void devlink_notify_register(struct devlink *devlink)
  8276. {
  8277. struct devlink_trap_policer_item *policer_item;
  8278. struct devlink_trap_group_item *group_item;
  8279. struct devlink_param_item *param_item;
  8280. struct devlink_trap_item *trap_item;
  8281. struct devlink_port *devlink_port;
  8282. struct devlink_linecard *linecard;
  8283. struct devlink_rate *rate_node;
  8284. struct devlink_region *region;
  8285. devlink_notify(devlink, DEVLINK_CMD_NEW);
  8286. list_for_each_entry(linecard, &devlink->linecard_list, list)
  8287. devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
  8288. list_for_each_entry(devlink_port, &devlink->port_list, list)
  8289. devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
  8290. list_for_each_entry(policer_item, &devlink->trap_policer_list, list)
  8291. devlink_trap_policer_notify(devlink, policer_item,
  8292. DEVLINK_CMD_TRAP_POLICER_NEW);
  8293. list_for_each_entry(group_item, &devlink->trap_group_list, list)
  8294. devlink_trap_group_notify(devlink, group_item,
  8295. DEVLINK_CMD_TRAP_GROUP_NEW);
  8296. list_for_each_entry(trap_item, &devlink->trap_list, list)
  8297. devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_NEW);
  8298. list_for_each_entry(rate_node, &devlink->rate_list, list)
  8299. devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_NEW);
  8300. list_for_each_entry(region, &devlink->region_list, list)
  8301. devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
  8302. list_for_each_entry(param_item, &devlink->param_list, list)
  8303. devlink_param_notify(devlink, 0, param_item,
  8304. DEVLINK_CMD_PARAM_NEW);
  8305. }
  8306. static void devlink_notify_unregister(struct devlink *devlink)
  8307. {
  8308. struct devlink_trap_policer_item *policer_item;
  8309. struct devlink_trap_group_item *group_item;
  8310. struct devlink_param_item *param_item;
  8311. struct devlink_trap_item *trap_item;
  8312. struct devlink_port *devlink_port;
  8313. struct devlink_linecard *linecard;
  8314. struct devlink_rate *rate_node;
  8315. struct devlink_region *region;
  8316. list_for_each_entry_reverse(param_item, &devlink->param_list, list)
  8317. devlink_param_notify(devlink, 0, param_item,
  8318. DEVLINK_CMD_PARAM_DEL);
  8319. list_for_each_entry_reverse(region, &devlink->region_list, list)
  8320. devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
  8321. list_for_each_entry_reverse(rate_node, &devlink->rate_list, list)
  8322. devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_DEL);
  8323. list_for_each_entry_reverse(trap_item, &devlink->trap_list, list)
  8324. devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_DEL);
  8325. list_for_each_entry_reverse(group_item, &devlink->trap_group_list, list)
  8326. devlink_trap_group_notify(devlink, group_item,
  8327. DEVLINK_CMD_TRAP_GROUP_DEL);
  8328. list_for_each_entry_reverse(policer_item, &devlink->trap_policer_list,
  8329. list)
  8330. devlink_trap_policer_notify(devlink, policer_item,
  8331. DEVLINK_CMD_TRAP_POLICER_DEL);
  8332. list_for_each_entry_reverse(devlink_port, &devlink->port_list, list)
  8333. devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
  8334. list_for_each_entry_reverse(linecard, &devlink->linecard_list, list)
  8335. devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL);
  8336. devlink_notify(devlink, DEVLINK_CMD_DEL);
  8337. }
  8338. /**
  8339. * devlink_register - Register devlink instance
  8340. *
  8341. * @devlink: devlink
  8342. */
  8343. void devlink_register(struct devlink *devlink)
  8344. {
  8345. ASSERT_DEVLINK_NOT_REGISTERED(devlink);
  8346. /* Make sure that we are in .probe() routine */
  8347. xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
  8348. devlink_notify_register(devlink);
  8349. }
  8350. EXPORT_SYMBOL_GPL(devlink_register);
  8351. /**
  8352. * devlink_unregister - Unregister devlink instance
  8353. *
  8354. * @devlink: devlink
  8355. */
  8356. void devlink_unregister(struct devlink *devlink)
  8357. {
  8358. ASSERT_DEVLINK_REGISTERED(devlink);
  8359. /* Make sure that we are in .remove() routine */
  8360. xa_set_mark(&devlinks, devlink->index, DEVLINK_UNREGISTERING);
  8361. devlink_put(devlink);
  8362. wait_for_completion(&devlink->comp);
  8363. devlink_notify_unregister(devlink);
  8364. xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
  8365. xa_clear_mark(&devlinks, devlink->index, DEVLINK_UNREGISTERING);
  8366. }
  8367. EXPORT_SYMBOL_GPL(devlink_unregister);
  8368. /**
  8369. * devlink_free - Free devlink instance resources
  8370. *
  8371. * @devlink: devlink
  8372. */
  8373. void devlink_free(struct devlink *devlink)
  8374. {
  8375. ASSERT_DEVLINK_NOT_REGISTERED(devlink);
  8376. mutex_destroy(&devlink->linecards_lock);
  8377. mutex_destroy(&devlink->reporters_lock);
  8378. mutex_destroy(&devlink->lock);
  8379. lockdep_unregister_key(&devlink->lock_key);
  8380. WARN_ON(!list_empty(&devlink->trap_policer_list));
  8381. WARN_ON(!list_empty(&devlink->trap_group_list));
  8382. WARN_ON(!list_empty(&devlink->trap_list));
  8383. WARN_ON(!list_empty(&devlink->reporter_list));
  8384. WARN_ON(!list_empty(&devlink->region_list));
  8385. WARN_ON(!list_empty(&devlink->param_list));
  8386. WARN_ON(!list_empty(&devlink->resource_list));
  8387. WARN_ON(!list_empty(&devlink->dpipe_table_list));
  8388. WARN_ON(!list_empty(&devlink->sb_list));
  8389. WARN_ON(!list_empty(&devlink->rate_list));
  8390. WARN_ON(!list_empty(&devlink->linecard_list));
  8391. WARN_ON(!list_empty(&devlink->port_list));
  8392. xa_destroy(&devlink->snapshot_ids);
  8393. xa_erase(&devlinks, devlink->index);
  8394. kfree(devlink);
  8395. }
  8396. EXPORT_SYMBOL_GPL(devlink_free);
  8397. static void devlink_port_type_warn(struct work_struct *work)
  8398. {
  8399. struct devlink_port *port = container_of(to_delayed_work(work),
  8400. struct devlink_port,
  8401. type_warn_dw);
  8402. dev_warn(port->devlink->dev, "Type was not set for devlink port.");
  8403. }
  8404. static bool devlink_port_type_should_warn(struct devlink_port *devlink_port)
  8405. {
  8406. /* Ignore CPU and DSA flavours. */
  8407. return devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_CPU &&
  8408. devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_DSA &&
  8409. devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_UNUSED;
  8410. }
  8411. #define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 3600)
  8412. static void devlink_port_type_warn_schedule(struct devlink_port *devlink_port)
  8413. {
  8414. if (!devlink_port_type_should_warn(devlink_port))
  8415. return;
  8416. /* Schedule a work to WARN in case driver does not set port
  8417. * type within timeout.
  8418. */
  8419. schedule_delayed_work(&devlink_port->type_warn_dw,
  8420. DEVLINK_PORT_TYPE_WARN_TIMEOUT);
  8421. }
  8422. static void devlink_port_type_warn_cancel(struct devlink_port *devlink_port)
  8423. {
  8424. if (!devlink_port_type_should_warn(devlink_port))
  8425. return;
  8426. cancel_delayed_work_sync(&devlink_port->type_warn_dw);
  8427. }
  8428. /**
  8429. * devlink_port_init() - Init devlink port
  8430. *
  8431. * @devlink: devlink
  8432. * @devlink_port: devlink port
  8433. *
  8434. * Initialize essencial stuff that is needed for functions
  8435. * that may be called before devlink port registration.
  8436. * Call to this function is optional and not needed
  8437. * in case the driver does not use such functions.
  8438. */
  8439. void devlink_port_init(struct devlink *devlink,
  8440. struct devlink_port *devlink_port)
  8441. {
  8442. if (devlink_port->initialized)
  8443. return;
  8444. devlink_port->devlink = devlink;
  8445. INIT_LIST_HEAD(&devlink_port->region_list);
  8446. devlink_port->initialized = true;
  8447. }
  8448. EXPORT_SYMBOL_GPL(devlink_port_init);
  8449. /**
  8450. * devlink_port_fini() - Deinitialize devlink port
  8451. *
  8452. * @devlink_port: devlink port
  8453. *
  8454. * Deinitialize essencial stuff that is in use for functions
  8455. * that may be called after devlink port unregistration.
  8456. * Call to this function is optional and not needed
  8457. * in case the driver does not use such functions.
  8458. */
  8459. void devlink_port_fini(struct devlink_port *devlink_port)
  8460. {
  8461. WARN_ON(!list_empty(&devlink_port->region_list));
  8462. }
  8463. EXPORT_SYMBOL_GPL(devlink_port_fini);
  8464. /**
  8465. * devl_port_register() - Register devlink port
  8466. *
  8467. * @devlink: devlink
  8468. * @devlink_port: devlink port
  8469. * @port_index: driver-specific numerical identifier of the port
  8470. *
  8471. * Register devlink port with provided port index. User can use
  8472. * any indexing, even hw-related one. devlink_port structure
  8473. * is convenient to be embedded inside user driver private structure.
  8474. * Note that the caller should take care of zeroing the devlink_port
  8475. * structure.
  8476. */
  8477. int devl_port_register(struct devlink *devlink,
  8478. struct devlink_port *devlink_port,
  8479. unsigned int port_index)
  8480. {
  8481. devl_assert_locked(devlink);
  8482. if (devlink_port_index_exists(devlink, port_index))
  8483. return -EEXIST;
  8484. ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
  8485. devlink_port_init(devlink, devlink_port);
  8486. devlink_port->registered = true;
  8487. devlink_port->index = port_index;
  8488. spin_lock_init(&devlink_port->type_lock);
  8489. INIT_LIST_HEAD(&devlink_port->reporter_list);
  8490. mutex_init(&devlink_port->reporters_lock);
  8491. list_add_tail(&devlink_port->list, &devlink->port_list);
  8492. INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn);
  8493. devlink_port_type_warn_schedule(devlink_port);
  8494. devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
  8495. return 0;
  8496. }
  8497. EXPORT_SYMBOL_GPL(devl_port_register);
  8498. /**
  8499. * devlink_port_register - Register devlink port
  8500. *
  8501. * @devlink: devlink
  8502. * @devlink_port: devlink port
  8503. * @port_index: driver-specific numerical identifier of the port
  8504. *
  8505. * Register devlink port with provided port index. User can use
  8506. * any indexing, even hw-related one. devlink_port structure
  8507. * is convenient to be embedded inside user driver private structure.
  8508. * Note that the caller should take care of zeroing the devlink_port
  8509. * structure.
  8510. *
  8511. * Context: Takes and release devlink->lock <mutex>.
  8512. */
  8513. int devlink_port_register(struct devlink *devlink,
  8514. struct devlink_port *devlink_port,
  8515. unsigned int port_index)
  8516. {
  8517. int err;
  8518. devl_lock(devlink);
  8519. err = devl_port_register(devlink, devlink_port, port_index);
  8520. devl_unlock(devlink);
  8521. return err;
  8522. }
  8523. EXPORT_SYMBOL_GPL(devlink_port_register);
  8524. /**
  8525. * devl_port_unregister() - Unregister devlink port
  8526. *
  8527. * @devlink_port: devlink port
  8528. */
  8529. void devl_port_unregister(struct devlink_port *devlink_port)
  8530. {
  8531. lockdep_assert_held(&devlink_port->devlink->lock);
  8532. devlink_port_type_warn_cancel(devlink_port);
  8533. devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
  8534. list_del(&devlink_port->list);
  8535. WARN_ON(!list_empty(&devlink_port->reporter_list));
  8536. mutex_destroy(&devlink_port->reporters_lock);
  8537. devlink_port->registered = false;
  8538. }
  8539. EXPORT_SYMBOL_GPL(devl_port_unregister);
  8540. /**
  8541. * devlink_port_unregister - Unregister devlink port
  8542. *
  8543. * @devlink_port: devlink port
  8544. *
  8545. * Context: Takes and release devlink->lock <mutex>.
  8546. */
  8547. void devlink_port_unregister(struct devlink_port *devlink_port)
  8548. {
  8549. struct devlink *devlink = devlink_port->devlink;
  8550. devl_lock(devlink);
  8551. devl_port_unregister(devlink_port);
  8552. devl_unlock(devlink);
  8553. }
  8554. EXPORT_SYMBOL_GPL(devlink_port_unregister);
  8555. static void __devlink_port_type_set(struct devlink_port *devlink_port,
  8556. enum devlink_port_type type,
  8557. void *type_dev)
  8558. {
  8559. ASSERT_DEVLINK_PORT_REGISTERED(devlink_port);
  8560. devlink_port_type_warn_cancel(devlink_port);
  8561. spin_lock_bh(&devlink_port->type_lock);
  8562. devlink_port->type = type;
  8563. devlink_port->type_dev = type_dev;
  8564. spin_unlock_bh(&devlink_port->type_lock);
  8565. devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
  8566. }
  8567. static void devlink_port_type_netdev_checks(struct devlink_port *devlink_port,
  8568. struct net_device *netdev)
  8569. {
  8570. const struct net_device_ops *ops = netdev->netdev_ops;
  8571. /* If driver registers devlink port, it should set devlink port
  8572. * attributes accordingly so the compat functions are called
  8573. * and the original ops are not used.
  8574. */
  8575. if (ops->ndo_get_phys_port_name) {
  8576. /* Some drivers use the same set of ndos for netdevs
  8577. * that have devlink_port registered and also for
  8578. * those who don't. Make sure that ndo_get_phys_port_name
  8579. * returns -EOPNOTSUPP here in case it is defined.
  8580. * Warn if not.
  8581. */
  8582. char name[IFNAMSIZ];
  8583. int err;
  8584. err = ops->ndo_get_phys_port_name(netdev, name, sizeof(name));
  8585. WARN_ON(err != -EOPNOTSUPP);
  8586. }
  8587. if (ops->ndo_get_port_parent_id) {
  8588. /* Some drivers use the same set of ndos for netdevs
  8589. * that have devlink_port registered and also for
  8590. * those who don't. Make sure that ndo_get_port_parent_id
  8591. * returns -EOPNOTSUPP here in case it is defined.
  8592. * Warn if not.
  8593. */
  8594. struct netdev_phys_item_id ppid;
  8595. int err;
  8596. err = ops->ndo_get_port_parent_id(netdev, &ppid);
  8597. WARN_ON(err != -EOPNOTSUPP);
  8598. }
  8599. }
  8600. /**
  8601. * devlink_port_type_eth_set - Set port type to Ethernet
  8602. *
  8603. * @devlink_port: devlink port
  8604. * @netdev: related netdevice
  8605. */
  8606. void devlink_port_type_eth_set(struct devlink_port *devlink_port,
  8607. struct net_device *netdev)
  8608. {
  8609. if (netdev)
  8610. devlink_port_type_netdev_checks(devlink_port, netdev);
  8611. else
  8612. dev_warn(devlink_port->devlink->dev,
  8613. "devlink port type for port %d set to Ethernet without a software interface reference, device type not supported by the kernel?\n",
  8614. devlink_port->index);
  8615. __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_ETH, netdev);
  8616. }
  8617. EXPORT_SYMBOL_GPL(devlink_port_type_eth_set);
  8618. /**
  8619. * devlink_port_type_ib_set - Set port type to InfiniBand
  8620. *
  8621. * @devlink_port: devlink port
  8622. * @ibdev: related IB device
  8623. */
  8624. void devlink_port_type_ib_set(struct devlink_port *devlink_port,
  8625. struct ib_device *ibdev)
  8626. {
  8627. __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_IB, ibdev);
  8628. }
  8629. EXPORT_SYMBOL_GPL(devlink_port_type_ib_set);
  8630. /**
  8631. * devlink_port_type_clear - Clear port type
  8632. *
  8633. * @devlink_port: devlink port
  8634. */
  8635. void devlink_port_type_clear(struct devlink_port *devlink_port)
  8636. {
  8637. __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_NOTSET, NULL);
  8638. devlink_port_type_warn_schedule(devlink_port);
  8639. }
  8640. EXPORT_SYMBOL_GPL(devlink_port_type_clear);
  8641. static int __devlink_port_attrs_set(struct devlink_port *devlink_port,
  8642. enum devlink_port_flavour flavour)
  8643. {
  8644. struct devlink_port_attrs *attrs = &devlink_port->attrs;
  8645. devlink_port->attrs_set = true;
  8646. attrs->flavour = flavour;
  8647. if (attrs->switch_id.id_len) {
  8648. devlink_port->switch_port = true;
  8649. if (WARN_ON(attrs->switch_id.id_len > MAX_PHYS_ITEM_ID_LEN))
  8650. attrs->switch_id.id_len = MAX_PHYS_ITEM_ID_LEN;
  8651. } else {
  8652. devlink_port->switch_port = false;
  8653. }
  8654. return 0;
  8655. }
  8656. /**
  8657. * devlink_port_attrs_set - Set port attributes
  8658. *
  8659. * @devlink_port: devlink port
  8660. * @attrs: devlink port attrs
  8661. */
  8662. void devlink_port_attrs_set(struct devlink_port *devlink_port,
  8663. struct devlink_port_attrs *attrs)
  8664. {
  8665. int ret;
  8666. ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
  8667. devlink_port->attrs = *attrs;
  8668. ret = __devlink_port_attrs_set(devlink_port, attrs->flavour);
  8669. if (ret)
  8670. return;
  8671. WARN_ON(attrs->splittable && attrs->split);
  8672. }
  8673. EXPORT_SYMBOL_GPL(devlink_port_attrs_set);
  8674. /**
  8675. * devlink_port_attrs_pci_pf_set - Set PCI PF port attributes
  8676. *
  8677. * @devlink_port: devlink port
  8678. * @controller: associated controller number for the devlink port instance
  8679. * @pf: associated PF for the devlink port instance
  8680. * @external: indicates if the port is for an external controller
  8681. */
  8682. void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 controller,
  8683. u16 pf, bool external)
  8684. {
  8685. struct devlink_port_attrs *attrs = &devlink_port->attrs;
  8686. int ret;
  8687. ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
  8688. ret = __devlink_port_attrs_set(devlink_port,
  8689. DEVLINK_PORT_FLAVOUR_PCI_PF);
  8690. if (ret)
  8691. return;
  8692. attrs->pci_pf.controller = controller;
  8693. attrs->pci_pf.pf = pf;
  8694. attrs->pci_pf.external = external;
  8695. }
  8696. EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_pf_set);
  8697. /**
  8698. * devlink_port_attrs_pci_vf_set - Set PCI VF port attributes
  8699. *
  8700. * @devlink_port: devlink port
  8701. * @controller: associated controller number for the devlink port instance
  8702. * @pf: associated PF for the devlink port instance
  8703. * @vf: associated VF of a PF for the devlink port instance
  8704. * @external: indicates if the port is for an external controller
  8705. */
  8706. void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
  8707. u16 pf, u16 vf, bool external)
  8708. {
  8709. struct devlink_port_attrs *attrs = &devlink_port->attrs;
  8710. int ret;
  8711. ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
  8712. ret = __devlink_port_attrs_set(devlink_port,
  8713. DEVLINK_PORT_FLAVOUR_PCI_VF);
  8714. if (ret)
  8715. return;
  8716. attrs->pci_vf.controller = controller;
  8717. attrs->pci_vf.pf = pf;
  8718. attrs->pci_vf.vf = vf;
  8719. attrs->pci_vf.external = external;
  8720. }
  8721. EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_vf_set);
  8722. /**
  8723. * devlink_port_attrs_pci_sf_set - Set PCI SF port attributes
  8724. *
  8725. * @devlink_port: devlink port
  8726. * @controller: associated controller number for the devlink port instance
  8727. * @pf: associated PF for the devlink port instance
  8728. * @sf: associated SF of a PF for the devlink port instance
  8729. * @external: indicates if the port is for an external controller
  8730. */
  8731. void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 controller,
  8732. u16 pf, u32 sf, bool external)
  8733. {
  8734. struct devlink_port_attrs *attrs = &devlink_port->attrs;
  8735. int ret;
  8736. ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
  8737. ret = __devlink_port_attrs_set(devlink_port,
  8738. DEVLINK_PORT_FLAVOUR_PCI_SF);
  8739. if (ret)
  8740. return;
  8741. attrs->pci_sf.controller = controller;
  8742. attrs->pci_sf.pf = pf;
  8743. attrs->pci_sf.sf = sf;
  8744. attrs->pci_sf.external = external;
  8745. }
  8746. EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_sf_set);
  8747. /**
  8748. * devl_rate_leaf_create - create devlink rate leaf
  8749. * @devlink_port: devlink port object to create rate object on
  8750. * @priv: driver private data
  8751. *
  8752. * Create devlink rate object of type leaf on provided @devlink_port.
  8753. */
  8754. int devl_rate_leaf_create(struct devlink_port *devlink_port, void *priv)
  8755. {
  8756. struct devlink *devlink = devlink_port->devlink;
  8757. struct devlink_rate *devlink_rate;
  8758. devl_assert_locked(devlink_port->devlink);
  8759. if (WARN_ON(devlink_port->devlink_rate))
  8760. return -EBUSY;
  8761. devlink_rate = kzalloc(sizeof(*devlink_rate), GFP_KERNEL);
  8762. if (!devlink_rate)
  8763. return -ENOMEM;
  8764. devlink_rate->type = DEVLINK_RATE_TYPE_LEAF;
  8765. devlink_rate->devlink = devlink;
  8766. devlink_rate->devlink_port = devlink_port;
  8767. devlink_rate->priv = priv;
  8768. list_add_tail(&devlink_rate->list, &devlink->rate_list);
  8769. devlink_port->devlink_rate = devlink_rate;
  8770. devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_NEW);
  8771. return 0;
  8772. }
  8773. EXPORT_SYMBOL_GPL(devl_rate_leaf_create);
  8774. /**
  8775. * devl_rate_leaf_destroy - destroy devlink rate leaf
  8776. *
  8777. * @devlink_port: devlink port linked to the rate object
  8778. *
  8779. * Destroy the devlink rate object of type leaf on provided @devlink_port.
  8780. */
  8781. void devl_rate_leaf_destroy(struct devlink_port *devlink_port)
  8782. {
  8783. struct devlink_rate *devlink_rate = devlink_port->devlink_rate;
  8784. devl_assert_locked(devlink_port->devlink);
  8785. if (!devlink_rate)
  8786. return;
  8787. devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_DEL);
  8788. if (devlink_rate->parent)
  8789. refcount_dec(&devlink_rate->parent->refcnt);
  8790. list_del(&devlink_rate->list);
  8791. devlink_port->devlink_rate = NULL;
  8792. kfree(devlink_rate);
  8793. }
  8794. EXPORT_SYMBOL_GPL(devl_rate_leaf_destroy);
  8795. /**
  8796. * devl_rate_nodes_destroy - destroy all devlink rate nodes on device
  8797. * @devlink: devlink instance
  8798. *
  8799. * Unset parent for all rate objects and destroy all rate nodes
  8800. * on specified device.
  8801. */
  8802. void devl_rate_nodes_destroy(struct devlink *devlink)
  8803. {
  8804. static struct devlink_rate *devlink_rate, *tmp;
  8805. const struct devlink_ops *ops = devlink->ops;
  8806. devl_assert_locked(devlink);
  8807. list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
  8808. if (!devlink_rate->parent)
  8809. continue;
  8810. refcount_dec(&devlink_rate->parent->refcnt);
  8811. if (devlink_rate_is_leaf(devlink_rate))
  8812. ops->rate_leaf_parent_set(devlink_rate, NULL, devlink_rate->priv,
  8813. NULL, NULL);
  8814. else if (devlink_rate_is_node(devlink_rate))
  8815. ops->rate_node_parent_set(devlink_rate, NULL, devlink_rate->priv,
  8816. NULL, NULL);
  8817. }
  8818. list_for_each_entry_safe(devlink_rate, tmp, &devlink->rate_list, list) {
  8819. if (devlink_rate_is_node(devlink_rate)) {
  8820. ops->rate_node_del(devlink_rate, devlink_rate->priv, NULL);
  8821. list_del(&devlink_rate->list);
  8822. kfree(devlink_rate->name);
  8823. kfree(devlink_rate);
  8824. }
  8825. }
  8826. }
  8827. EXPORT_SYMBOL_GPL(devl_rate_nodes_destroy);
  8828. /**
  8829. * devlink_port_linecard_set - Link port with a linecard
  8830. *
  8831. * @devlink_port: devlink port
  8832. * @linecard: devlink linecard
  8833. */
  8834. void devlink_port_linecard_set(struct devlink_port *devlink_port,
  8835. struct devlink_linecard *linecard)
  8836. {
  8837. ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
  8838. devlink_port->linecard = linecard;
  8839. }
  8840. EXPORT_SYMBOL_GPL(devlink_port_linecard_set);
  8841. static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
  8842. char *name, size_t len)
  8843. {
  8844. struct devlink_port_attrs *attrs = &devlink_port->attrs;
  8845. int n = 0;
  8846. if (!devlink_port->attrs_set)
  8847. return -EOPNOTSUPP;
  8848. switch (attrs->flavour) {
  8849. case DEVLINK_PORT_FLAVOUR_PHYSICAL:
  8850. if (devlink_port->linecard)
  8851. n = snprintf(name, len, "l%u",
  8852. devlink_port->linecard->index);
  8853. if (n < len)
  8854. n += snprintf(name + n, len - n, "p%u",
  8855. attrs->phys.port_number);
  8856. if (n < len && attrs->split)
  8857. n += snprintf(name + n, len - n, "s%u",
  8858. attrs->phys.split_subport_number);
  8859. break;
  8860. case DEVLINK_PORT_FLAVOUR_CPU:
  8861. case DEVLINK_PORT_FLAVOUR_DSA:
  8862. case DEVLINK_PORT_FLAVOUR_UNUSED:
  8863. /* As CPU and DSA ports do not have a netdevice associated
  8864. * case should not ever happen.
  8865. */
  8866. WARN_ON(1);
  8867. return -EINVAL;
  8868. case DEVLINK_PORT_FLAVOUR_PCI_PF:
  8869. if (attrs->pci_pf.external) {
  8870. n = snprintf(name, len, "c%u", attrs->pci_pf.controller);
  8871. if (n >= len)
  8872. return -EINVAL;
  8873. len -= n;
  8874. name += n;
  8875. }
  8876. n = snprintf(name, len, "pf%u", attrs->pci_pf.pf);
  8877. break;
  8878. case DEVLINK_PORT_FLAVOUR_PCI_VF:
  8879. if (attrs->pci_vf.external) {
  8880. n = snprintf(name, len, "c%u", attrs->pci_vf.controller);
  8881. if (n >= len)
  8882. return -EINVAL;
  8883. len -= n;
  8884. name += n;
  8885. }
  8886. n = snprintf(name, len, "pf%uvf%u",
  8887. attrs->pci_vf.pf, attrs->pci_vf.vf);
  8888. break;
  8889. case DEVLINK_PORT_FLAVOUR_PCI_SF:
  8890. if (attrs->pci_sf.external) {
  8891. n = snprintf(name, len, "c%u", attrs->pci_sf.controller);
  8892. if (n >= len)
  8893. return -EINVAL;
  8894. len -= n;
  8895. name += n;
  8896. }
  8897. n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
  8898. attrs->pci_sf.sf);
  8899. break;
  8900. case DEVLINK_PORT_FLAVOUR_VIRTUAL:
  8901. return -EOPNOTSUPP;
  8902. }
  8903. if (n >= len)
  8904. return -EINVAL;
  8905. return 0;
  8906. }
  8907. static int devlink_linecard_types_init(struct devlink_linecard *linecard)
  8908. {
  8909. struct devlink_linecard_type *linecard_type;
  8910. unsigned int count;
  8911. int i;
  8912. count = linecard->ops->types_count(linecard, linecard->priv);
  8913. linecard->types = kmalloc_array(count, sizeof(*linecard_type),
  8914. GFP_KERNEL);
  8915. if (!linecard->types)
  8916. return -ENOMEM;
  8917. linecard->types_count = count;
  8918. for (i = 0; i < count; i++) {
  8919. linecard_type = &linecard->types[i];
  8920. linecard->ops->types_get(linecard, linecard->priv, i,
  8921. &linecard_type->type,
  8922. &linecard_type->priv);
  8923. }
  8924. return 0;
  8925. }
  8926. static void devlink_linecard_types_fini(struct devlink_linecard *linecard)
  8927. {
  8928. kfree(linecard->types);
  8929. }
  8930. /**
  8931. * devlink_linecard_create - Create devlink linecard
  8932. *
  8933. * @devlink: devlink
  8934. * @linecard_index: driver-specific numerical identifier of the linecard
  8935. * @ops: linecards ops
  8936. * @priv: user priv pointer
  8937. *
  8938. * Create devlink linecard instance with provided linecard index.
  8939. * Caller can use any indexing, even hw-related one.
  8940. *
  8941. * Return: Line card structure or an ERR_PTR() encoded error code.
  8942. */
  8943. struct devlink_linecard *
  8944. devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index,
  8945. const struct devlink_linecard_ops *ops, void *priv)
  8946. {
  8947. struct devlink_linecard *linecard;
  8948. int err;
  8949. if (WARN_ON(!ops || !ops->provision || !ops->unprovision ||
  8950. !ops->types_count || !ops->types_get))
  8951. return ERR_PTR(-EINVAL);
  8952. mutex_lock(&devlink->linecards_lock);
  8953. if (devlink_linecard_index_exists(devlink, linecard_index)) {
  8954. mutex_unlock(&devlink->linecards_lock);
  8955. return ERR_PTR(-EEXIST);
  8956. }
  8957. linecard = kzalloc(sizeof(*linecard), GFP_KERNEL);
  8958. if (!linecard) {
  8959. mutex_unlock(&devlink->linecards_lock);
  8960. return ERR_PTR(-ENOMEM);
  8961. }
  8962. linecard->devlink = devlink;
  8963. linecard->index = linecard_index;
  8964. linecard->ops = ops;
  8965. linecard->priv = priv;
  8966. linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
  8967. mutex_init(&linecard->state_lock);
  8968. err = devlink_linecard_types_init(linecard);
  8969. if (err) {
  8970. mutex_destroy(&linecard->state_lock);
  8971. kfree(linecard);
  8972. mutex_unlock(&devlink->linecards_lock);
  8973. return ERR_PTR(err);
  8974. }
  8975. list_add_tail(&linecard->list, &devlink->linecard_list);
  8976. refcount_set(&linecard->refcount, 1);
  8977. mutex_unlock(&devlink->linecards_lock);
  8978. devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
  8979. return linecard;
  8980. }
  8981. EXPORT_SYMBOL_GPL(devlink_linecard_create);
  8982. /**
  8983. * devlink_linecard_destroy - Destroy devlink linecard
  8984. *
  8985. * @linecard: devlink linecard
  8986. */
  8987. void devlink_linecard_destroy(struct devlink_linecard *linecard)
  8988. {
  8989. struct devlink *devlink = linecard->devlink;
  8990. devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL);
  8991. mutex_lock(&devlink->linecards_lock);
  8992. list_del(&linecard->list);
  8993. devlink_linecard_types_fini(linecard);
  8994. mutex_unlock(&devlink->linecards_lock);
  8995. devlink_linecard_put(linecard);
  8996. }
  8997. EXPORT_SYMBOL_GPL(devlink_linecard_destroy);
  8998. /**
  8999. * devlink_linecard_provision_set - Set provisioning on linecard
  9000. *
  9001. * @linecard: devlink linecard
  9002. * @type: linecard type
  9003. *
  9004. * This is either called directly from the provision() op call or
  9005. * as a result of the provision() op call asynchronously.
  9006. */
  9007. void devlink_linecard_provision_set(struct devlink_linecard *linecard,
  9008. const char *type)
  9009. {
  9010. mutex_lock(&linecard->state_lock);
  9011. WARN_ON(linecard->type && strcmp(linecard->type, type));
  9012. linecard->state = DEVLINK_LINECARD_STATE_PROVISIONED;
  9013. linecard->type = type;
  9014. devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
  9015. mutex_unlock(&linecard->state_lock);
  9016. }
  9017. EXPORT_SYMBOL_GPL(devlink_linecard_provision_set);
  9018. /**
  9019. * devlink_linecard_provision_clear - Clear provisioning on linecard
  9020. *
  9021. * @linecard: devlink linecard
  9022. *
  9023. * This is either called directly from the unprovision() op call or
  9024. * as a result of the unprovision() op call asynchronously.
  9025. */
  9026. void devlink_linecard_provision_clear(struct devlink_linecard *linecard)
  9027. {
  9028. mutex_lock(&linecard->state_lock);
  9029. WARN_ON(linecard->nested_devlink);
  9030. linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
  9031. linecard->type = NULL;
  9032. devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
  9033. mutex_unlock(&linecard->state_lock);
  9034. }
  9035. EXPORT_SYMBOL_GPL(devlink_linecard_provision_clear);
  9036. /**
  9037. * devlink_linecard_provision_fail - Fail provisioning on linecard
  9038. *
  9039. * @linecard: devlink linecard
  9040. *
  9041. * This is either called directly from the provision() op call or
  9042. * as a result of the provision() op call asynchronously.
  9043. */
  9044. void devlink_linecard_provision_fail(struct devlink_linecard *linecard)
  9045. {
  9046. mutex_lock(&linecard->state_lock);
  9047. WARN_ON(linecard->nested_devlink);
  9048. linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING_FAILED;
  9049. devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
  9050. mutex_unlock(&linecard->state_lock);
  9051. }
  9052. EXPORT_SYMBOL_GPL(devlink_linecard_provision_fail);
  9053. /**
  9054. * devlink_linecard_activate - Set linecard active
  9055. *
  9056. * @linecard: devlink linecard
  9057. */
  9058. void devlink_linecard_activate(struct devlink_linecard *linecard)
  9059. {
  9060. mutex_lock(&linecard->state_lock);
  9061. WARN_ON(linecard->state != DEVLINK_LINECARD_STATE_PROVISIONED);
  9062. linecard->state = DEVLINK_LINECARD_STATE_ACTIVE;
  9063. devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
  9064. mutex_unlock(&linecard->state_lock);
  9065. }
  9066. EXPORT_SYMBOL_GPL(devlink_linecard_activate);
  9067. /**
  9068. * devlink_linecard_deactivate - Set linecard inactive
  9069. *
  9070. * @linecard: devlink linecard
  9071. */
  9072. void devlink_linecard_deactivate(struct devlink_linecard *linecard)
  9073. {
  9074. mutex_lock(&linecard->state_lock);
  9075. switch (linecard->state) {
  9076. case DEVLINK_LINECARD_STATE_ACTIVE:
  9077. linecard->state = DEVLINK_LINECARD_STATE_PROVISIONED;
  9078. devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
  9079. break;
  9080. case DEVLINK_LINECARD_STATE_UNPROVISIONING:
  9081. /* Line card is being deactivated as part
  9082. * of unprovisioning flow.
  9083. */
  9084. break;
  9085. default:
  9086. WARN_ON(1);
  9087. break;
  9088. }
  9089. mutex_unlock(&linecard->state_lock);
  9090. }
  9091. EXPORT_SYMBOL_GPL(devlink_linecard_deactivate);
  9092. /**
  9093. * devlink_linecard_nested_dl_set - Attach/detach nested devlink
  9094. * instance to linecard.
  9095. *
  9096. * @linecard: devlink linecard
  9097. * @nested_devlink: devlink instance to attach or NULL to detach
  9098. */
  9099. void devlink_linecard_nested_dl_set(struct devlink_linecard *linecard,
  9100. struct devlink *nested_devlink)
  9101. {
  9102. mutex_lock(&linecard->state_lock);
  9103. linecard->nested_devlink = nested_devlink;
  9104. devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
  9105. mutex_unlock(&linecard->state_lock);
  9106. }
  9107. EXPORT_SYMBOL_GPL(devlink_linecard_nested_dl_set);
  9108. int devl_sb_register(struct devlink *devlink, unsigned int sb_index,
  9109. u32 size, u16 ingress_pools_count,
  9110. u16 egress_pools_count, u16 ingress_tc_count,
  9111. u16 egress_tc_count)
  9112. {
  9113. struct devlink_sb *devlink_sb;
  9114. lockdep_assert_held(&devlink->lock);
  9115. if (devlink_sb_index_exists(devlink, sb_index))
  9116. return -EEXIST;
  9117. devlink_sb = kzalloc(sizeof(*devlink_sb), GFP_KERNEL);
  9118. if (!devlink_sb)
  9119. return -ENOMEM;
  9120. devlink_sb->index = sb_index;
  9121. devlink_sb->size = size;
  9122. devlink_sb->ingress_pools_count = ingress_pools_count;
  9123. devlink_sb->egress_pools_count = egress_pools_count;
  9124. devlink_sb->ingress_tc_count = ingress_tc_count;
  9125. devlink_sb->egress_tc_count = egress_tc_count;
  9126. list_add_tail(&devlink_sb->list, &devlink->sb_list);
  9127. return 0;
  9128. }
  9129. EXPORT_SYMBOL_GPL(devl_sb_register);
  9130. int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
  9131. u32 size, u16 ingress_pools_count,
  9132. u16 egress_pools_count, u16 ingress_tc_count,
  9133. u16 egress_tc_count)
  9134. {
  9135. int err;
  9136. devl_lock(devlink);
  9137. err = devl_sb_register(devlink, sb_index, size, ingress_pools_count,
  9138. egress_pools_count, ingress_tc_count,
  9139. egress_tc_count);
  9140. devl_unlock(devlink);
  9141. return err;
  9142. }
  9143. EXPORT_SYMBOL_GPL(devlink_sb_register);
  9144. void devl_sb_unregister(struct devlink *devlink, unsigned int sb_index)
  9145. {
  9146. struct devlink_sb *devlink_sb;
  9147. lockdep_assert_held(&devlink->lock);
  9148. devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
  9149. WARN_ON(!devlink_sb);
  9150. list_del(&devlink_sb->list);
  9151. kfree(devlink_sb);
  9152. }
  9153. EXPORT_SYMBOL_GPL(devl_sb_unregister);
  9154. void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index)
  9155. {
  9156. devl_lock(devlink);
  9157. devl_sb_unregister(devlink, sb_index);
  9158. devl_unlock(devlink);
  9159. }
  9160. EXPORT_SYMBOL_GPL(devlink_sb_unregister);
  9161. /**
  9162. * devl_dpipe_headers_register - register dpipe headers
  9163. *
  9164. * @devlink: devlink
  9165. * @dpipe_headers: dpipe header array
  9166. *
  9167. * Register the headers supported by hardware.
  9168. */
  9169. void devl_dpipe_headers_register(struct devlink *devlink,
  9170. struct devlink_dpipe_headers *dpipe_headers)
  9171. {
  9172. lockdep_assert_held(&devlink->lock);
  9173. devlink->dpipe_headers = dpipe_headers;
  9174. }
  9175. EXPORT_SYMBOL_GPL(devl_dpipe_headers_register);
  9176. /**
  9177. * devl_dpipe_headers_unregister - unregister dpipe headers
  9178. *
  9179. * @devlink: devlink
  9180. *
  9181. * Unregister the headers supported by hardware.
  9182. */
  9183. void devl_dpipe_headers_unregister(struct devlink *devlink)
  9184. {
  9185. lockdep_assert_held(&devlink->lock);
  9186. devlink->dpipe_headers = NULL;
  9187. }
  9188. EXPORT_SYMBOL_GPL(devl_dpipe_headers_unregister);
  9189. /**
  9190. * devlink_dpipe_table_counter_enabled - check if counter allocation
  9191. * required
  9192. * @devlink: devlink
  9193. * @table_name: tables name
  9194. *
  9195. * Used by driver to check if counter allocation is required.
  9196. * After counter allocation is turned on the table entries
  9197. * are updated to include counter statistics.
  9198. *
  9199. * After that point on the driver must respect the counter
  9200. * state so that each entry added to the table is added
  9201. * with a counter.
  9202. */
  9203. bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
  9204. const char *table_name)
  9205. {
  9206. struct devlink_dpipe_table *table;
  9207. bool enabled;
  9208. rcu_read_lock();
  9209. table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
  9210. table_name, devlink);
  9211. enabled = false;
  9212. if (table)
  9213. enabled = table->counters_enabled;
  9214. rcu_read_unlock();
  9215. return enabled;
  9216. }
  9217. EXPORT_SYMBOL_GPL(devlink_dpipe_table_counter_enabled);
  9218. /**
  9219. * devl_dpipe_table_register - register dpipe table
  9220. *
  9221. * @devlink: devlink
  9222. * @table_name: table name
  9223. * @table_ops: table ops
  9224. * @priv: priv
  9225. * @counter_control_extern: external control for counters
  9226. */
  9227. int devl_dpipe_table_register(struct devlink *devlink,
  9228. const char *table_name,
  9229. struct devlink_dpipe_table_ops *table_ops,
  9230. void *priv, bool counter_control_extern)
  9231. {
  9232. struct devlink_dpipe_table *table;
  9233. lockdep_assert_held(&devlink->lock);
  9234. if (WARN_ON(!table_ops->size_get))
  9235. return -EINVAL;
  9236. if (devlink_dpipe_table_find(&devlink->dpipe_table_list, table_name,
  9237. devlink))
  9238. return -EEXIST;
  9239. table = kzalloc(sizeof(*table), GFP_KERNEL);
  9240. if (!table)
  9241. return -ENOMEM;
  9242. table->name = table_name;
  9243. table->table_ops = table_ops;
  9244. table->priv = priv;
  9245. table->counter_control_extern = counter_control_extern;
  9246. list_add_tail_rcu(&table->list, &devlink->dpipe_table_list);
  9247. return 0;
  9248. }
  9249. EXPORT_SYMBOL_GPL(devl_dpipe_table_register);
  9250. /**
  9251. * devl_dpipe_table_unregister - unregister dpipe table
  9252. *
  9253. * @devlink: devlink
  9254. * @table_name: table name
  9255. */
  9256. void devl_dpipe_table_unregister(struct devlink *devlink,
  9257. const char *table_name)
  9258. {
  9259. struct devlink_dpipe_table *table;
  9260. lockdep_assert_held(&devlink->lock);
  9261. table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
  9262. table_name, devlink);
  9263. if (!table)
  9264. return;
  9265. list_del_rcu(&table->list);
  9266. kfree_rcu(table, rcu);
  9267. }
  9268. EXPORT_SYMBOL_GPL(devl_dpipe_table_unregister);
  9269. /**
  9270. * devl_resource_register - devlink resource register
  9271. *
  9272. * @devlink: devlink
  9273. * @resource_name: resource's name
  9274. * @resource_size: resource's size
  9275. * @resource_id: resource's id
  9276. * @parent_resource_id: resource's parent id
  9277. * @size_params: size parameters
  9278. *
  9279. * Generic resources should reuse the same names across drivers.
  9280. * Please see the generic resources list at:
  9281. * Documentation/networking/devlink/devlink-resource.rst
  9282. */
  9283. int devl_resource_register(struct devlink *devlink,
  9284. const char *resource_name,
  9285. u64 resource_size,
  9286. u64 resource_id,
  9287. u64 parent_resource_id,
  9288. const struct devlink_resource_size_params *size_params)
  9289. {
  9290. struct devlink_resource *resource;
  9291. struct list_head *resource_list;
  9292. bool top_hierarchy;
  9293. lockdep_assert_held(&devlink->lock);
  9294. top_hierarchy = parent_resource_id == DEVLINK_RESOURCE_ID_PARENT_TOP;
  9295. resource = devlink_resource_find(devlink, NULL, resource_id);
  9296. if (resource)
  9297. return -EINVAL;
  9298. resource = kzalloc(sizeof(*resource), GFP_KERNEL);
  9299. if (!resource)
  9300. return -ENOMEM;
  9301. if (top_hierarchy) {
  9302. resource_list = &devlink->resource_list;
  9303. } else {
  9304. struct devlink_resource *parent_resource;
  9305. parent_resource = devlink_resource_find(devlink, NULL,
  9306. parent_resource_id);
  9307. if (parent_resource) {
  9308. resource_list = &parent_resource->resource_list;
  9309. resource->parent = parent_resource;
  9310. } else {
  9311. kfree(resource);
  9312. return -EINVAL;
  9313. }
  9314. }
  9315. resource->name = resource_name;
  9316. resource->size = resource_size;
  9317. resource->size_new = resource_size;
  9318. resource->id = resource_id;
  9319. resource->size_valid = true;
  9320. memcpy(&resource->size_params, size_params,
  9321. sizeof(resource->size_params));
  9322. INIT_LIST_HEAD(&resource->resource_list);
  9323. list_add_tail(&resource->list, resource_list);
  9324. return 0;
  9325. }
  9326. EXPORT_SYMBOL_GPL(devl_resource_register);
  9327. /**
  9328. * devlink_resource_register - devlink resource register
  9329. *
  9330. * @devlink: devlink
  9331. * @resource_name: resource's name
  9332. * @resource_size: resource's size
  9333. * @resource_id: resource's id
  9334. * @parent_resource_id: resource's parent id
  9335. * @size_params: size parameters
  9336. *
  9337. * Generic resources should reuse the same names across drivers.
  9338. * Please see the generic resources list at:
  9339. * Documentation/networking/devlink/devlink-resource.rst
  9340. *
  9341. * Context: Takes and release devlink->lock <mutex>.
  9342. */
  9343. int devlink_resource_register(struct devlink *devlink,
  9344. const char *resource_name,
  9345. u64 resource_size,
  9346. u64 resource_id,
  9347. u64 parent_resource_id,
  9348. const struct devlink_resource_size_params *size_params)
  9349. {
  9350. int err;
  9351. devl_lock(devlink);
  9352. err = devl_resource_register(devlink, resource_name, resource_size,
  9353. resource_id, parent_resource_id, size_params);
  9354. devl_unlock(devlink);
  9355. return err;
  9356. }
  9357. EXPORT_SYMBOL_GPL(devlink_resource_register);
  9358. static void devlink_resource_unregister(struct devlink *devlink,
  9359. struct devlink_resource *resource)
  9360. {
  9361. struct devlink_resource *tmp, *child_resource;
  9362. list_for_each_entry_safe(child_resource, tmp, &resource->resource_list,
  9363. list) {
  9364. devlink_resource_unregister(devlink, child_resource);
  9365. list_del(&child_resource->list);
  9366. kfree(child_resource);
  9367. }
  9368. }
  9369. /**
  9370. * devl_resources_unregister - free all resources
  9371. *
  9372. * @devlink: devlink
  9373. */
  9374. void devl_resources_unregister(struct devlink *devlink)
  9375. {
  9376. struct devlink_resource *tmp, *child_resource;
  9377. lockdep_assert_held(&devlink->lock);
  9378. list_for_each_entry_safe(child_resource, tmp, &devlink->resource_list,
  9379. list) {
  9380. devlink_resource_unregister(devlink, child_resource);
  9381. list_del(&child_resource->list);
  9382. kfree(child_resource);
  9383. }
  9384. }
  9385. EXPORT_SYMBOL_GPL(devl_resources_unregister);
  9386. /**
  9387. * devlink_resources_unregister - free all resources
  9388. *
  9389. * @devlink: devlink
  9390. *
  9391. * Context: Takes and release devlink->lock <mutex>.
  9392. */
  9393. void devlink_resources_unregister(struct devlink *devlink)
  9394. {
  9395. devl_lock(devlink);
  9396. devl_resources_unregister(devlink);
  9397. devl_unlock(devlink);
  9398. }
  9399. EXPORT_SYMBOL_GPL(devlink_resources_unregister);
  9400. /**
  9401. * devl_resource_size_get - get and update size
  9402. *
  9403. * @devlink: devlink
  9404. * @resource_id: the requested resource id
  9405. * @p_resource_size: ptr to update
  9406. */
  9407. int devl_resource_size_get(struct devlink *devlink,
  9408. u64 resource_id,
  9409. u64 *p_resource_size)
  9410. {
  9411. struct devlink_resource *resource;
  9412. lockdep_assert_held(&devlink->lock);
  9413. resource = devlink_resource_find(devlink, NULL, resource_id);
  9414. if (!resource)
  9415. return -EINVAL;
  9416. *p_resource_size = resource->size_new;
  9417. resource->size = resource->size_new;
  9418. return 0;
  9419. }
  9420. EXPORT_SYMBOL_GPL(devl_resource_size_get);
  9421. /**
  9422. * devl_dpipe_table_resource_set - set the resource id
  9423. *
  9424. * @devlink: devlink
  9425. * @table_name: table name
  9426. * @resource_id: resource id
  9427. * @resource_units: number of resource's units consumed per table's entry
  9428. */
  9429. int devl_dpipe_table_resource_set(struct devlink *devlink,
  9430. const char *table_name, u64 resource_id,
  9431. u64 resource_units)
  9432. {
  9433. struct devlink_dpipe_table *table;
  9434. table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
  9435. table_name, devlink);
  9436. if (!table)
  9437. return -EINVAL;
  9438. table->resource_id = resource_id;
  9439. table->resource_units = resource_units;
  9440. table->resource_valid = true;
  9441. return 0;
  9442. }
  9443. EXPORT_SYMBOL_GPL(devl_dpipe_table_resource_set);
  9444. /**
  9445. * devl_resource_occ_get_register - register occupancy getter
  9446. *
  9447. * @devlink: devlink
  9448. * @resource_id: resource id
  9449. * @occ_get: occupancy getter callback
  9450. * @occ_get_priv: occupancy getter callback priv
  9451. */
  9452. void devl_resource_occ_get_register(struct devlink *devlink,
  9453. u64 resource_id,
  9454. devlink_resource_occ_get_t *occ_get,
  9455. void *occ_get_priv)
  9456. {
  9457. struct devlink_resource *resource;
  9458. lockdep_assert_held(&devlink->lock);
  9459. resource = devlink_resource_find(devlink, NULL, resource_id);
  9460. if (WARN_ON(!resource))
  9461. return;
  9462. WARN_ON(resource->occ_get);
  9463. resource->occ_get = occ_get;
  9464. resource->occ_get_priv = occ_get_priv;
  9465. }
  9466. EXPORT_SYMBOL_GPL(devl_resource_occ_get_register);
  9467. /**
  9468. * devlink_resource_occ_get_register - register occupancy getter
  9469. *
  9470. * @devlink: devlink
  9471. * @resource_id: resource id
  9472. * @occ_get: occupancy getter callback
  9473. * @occ_get_priv: occupancy getter callback priv
  9474. *
  9475. * Context: Takes and release devlink->lock <mutex>.
  9476. */
  9477. void devlink_resource_occ_get_register(struct devlink *devlink,
  9478. u64 resource_id,
  9479. devlink_resource_occ_get_t *occ_get,
  9480. void *occ_get_priv)
  9481. {
  9482. devl_lock(devlink);
  9483. devl_resource_occ_get_register(devlink, resource_id,
  9484. occ_get, occ_get_priv);
  9485. devl_unlock(devlink);
  9486. }
  9487. EXPORT_SYMBOL_GPL(devlink_resource_occ_get_register);
  9488. /**
  9489. * devl_resource_occ_get_unregister - unregister occupancy getter
  9490. *
  9491. * @devlink: devlink
  9492. * @resource_id: resource id
  9493. */
  9494. void devl_resource_occ_get_unregister(struct devlink *devlink,
  9495. u64 resource_id)
  9496. {
  9497. struct devlink_resource *resource;
  9498. lockdep_assert_held(&devlink->lock);
  9499. resource = devlink_resource_find(devlink, NULL, resource_id);
  9500. if (WARN_ON(!resource))
  9501. return;
  9502. WARN_ON(!resource->occ_get);
  9503. resource->occ_get = NULL;
  9504. resource->occ_get_priv = NULL;
  9505. }
  9506. EXPORT_SYMBOL_GPL(devl_resource_occ_get_unregister);
  9507. /**
  9508. * devlink_resource_occ_get_unregister - unregister occupancy getter
  9509. *
  9510. * @devlink: devlink
  9511. * @resource_id: resource id
  9512. *
  9513. * Context: Takes and release devlink->lock <mutex>.
  9514. */
  9515. void devlink_resource_occ_get_unregister(struct devlink *devlink,
  9516. u64 resource_id)
  9517. {
  9518. devl_lock(devlink);
  9519. devl_resource_occ_get_unregister(devlink, resource_id);
  9520. devl_unlock(devlink);
  9521. }
  9522. EXPORT_SYMBOL_GPL(devlink_resource_occ_get_unregister);
  9523. static int devlink_param_verify(const struct devlink_param *param)
  9524. {
  9525. if (!param || !param->name || !param->supported_cmodes)
  9526. return -EINVAL;
  9527. if (param->generic)
  9528. return devlink_param_generic_verify(param);
  9529. else
  9530. return devlink_param_driver_verify(param);
  9531. }
  9532. /**
  9533. * devlink_params_register - register configuration parameters
  9534. *
  9535. * @devlink: devlink
  9536. * @params: configuration parameters array
  9537. * @params_count: number of parameters provided
  9538. *
  9539. * Register the configuration parameters supported by the driver.
  9540. */
  9541. int devlink_params_register(struct devlink *devlink,
  9542. const struct devlink_param *params,
  9543. size_t params_count)
  9544. {
  9545. const struct devlink_param *param = params;
  9546. int i, err;
  9547. ASSERT_DEVLINK_NOT_REGISTERED(devlink);
  9548. for (i = 0; i < params_count; i++, param++) {
  9549. err = devlink_param_register(devlink, param);
  9550. if (err)
  9551. goto rollback;
  9552. }
  9553. return 0;
  9554. rollback:
  9555. if (!i)
  9556. return err;
  9557. for (param--; i > 0; i--, param--)
  9558. devlink_param_unregister(devlink, param);
  9559. return err;
  9560. }
  9561. EXPORT_SYMBOL_GPL(devlink_params_register);
  9562. /**
  9563. * devlink_params_unregister - unregister configuration parameters
  9564. * @devlink: devlink
  9565. * @params: configuration parameters to unregister
  9566. * @params_count: number of parameters provided
  9567. */
  9568. void devlink_params_unregister(struct devlink *devlink,
  9569. const struct devlink_param *params,
  9570. size_t params_count)
  9571. {
  9572. const struct devlink_param *param = params;
  9573. int i;
  9574. ASSERT_DEVLINK_NOT_REGISTERED(devlink);
  9575. for (i = 0; i < params_count; i++, param++)
  9576. devlink_param_unregister(devlink, param);
  9577. }
  9578. EXPORT_SYMBOL_GPL(devlink_params_unregister);
  9579. /**
  9580. * devlink_param_register - register one configuration parameter
  9581. *
  9582. * @devlink: devlink
  9583. * @param: one configuration parameter
  9584. *
  9585. * Register the configuration parameter supported by the driver.
  9586. * Return: returns 0 on successful registration or error code otherwise.
  9587. */
  9588. int devlink_param_register(struct devlink *devlink,
  9589. const struct devlink_param *param)
  9590. {
  9591. struct devlink_param_item *param_item;
  9592. ASSERT_DEVLINK_NOT_REGISTERED(devlink);
  9593. WARN_ON(devlink_param_verify(param));
  9594. WARN_ON(devlink_param_find_by_name(&devlink->param_list, param->name));
  9595. if (param->supported_cmodes == BIT(DEVLINK_PARAM_CMODE_DRIVERINIT))
  9596. WARN_ON(param->get || param->set);
  9597. else
  9598. WARN_ON(!param->get || !param->set);
  9599. param_item = kzalloc(sizeof(*param_item), GFP_KERNEL);
  9600. if (!param_item)
  9601. return -ENOMEM;
  9602. param_item->param = param;
  9603. list_add_tail(&param_item->list, &devlink->param_list);
  9604. return 0;
  9605. }
  9606. EXPORT_SYMBOL_GPL(devlink_param_register);
  9607. /**
  9608. * devlink_param_unregister - unregister one configuration parameter
  9609. * @devlink: devlink
  9610. * @param: configuration parameter to unregister
  9611. */
  9612. void devlink_param_unregister(struct devlink *devlink,
  9613. const struct devlink_param *param)
  9614. {
  9615. struct devlink_param_item *param_item;
  9616. ASSERT_DEVLINK_NOT_REGISTERED(devlink);
  9617. param_item =
  9618. devlink_param_find_by_name(&devlink->param_list, param->name);
  9619. WARN_ON(!param_item);
  9620. list_del(&param_item->list);
  9621. kfree(param_item);
  9622. }
  9623. EXPORT_SYMBOL_GPL(devlink_param_unregister);
  9624. /**
  9625. * devlink_param_driverinit_value_get - get configuration parameter
  9626. * value for driver initializing
  9627. *
  9628. * @devlink: devlink
  9629. * @param_id: parameter ID
  9630. * @init_val: value of parameter in driverinit configuration mode
  9631. *
  9632. * This function should be used by the driver to get driverinit
  9633. * configuration for initialization after reload command.
  9634. */
  9635. int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
  9636. union devlink_param_value *init_val)
  9637. {
  9638. struct devlink_param_item *param_item;
  9639. if (!devlink_reload_supported(devlink->ops))
  9640. return -EOPNOTSUPP;
  9641. param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
  9642. if (!param_item)
  9643. return -EINVAL;
  9644. if (!param_item->driverinit_value_valid ||
  9645. !devlink_param_cmode_is_supported(param_item->param,
  9646. DEVLINK_PARAM_CMODE_DRIVERINIT))
  9647. return -EOPNOTSUPP;
  9648. if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
  9649. strcpy(init_val->vstr, param_item->driverinit_value.vstr);
  9650. else
  9651. *init_val = param_item->driverinit_value;
  9652. return 0;
  9653. }
  9654. EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get);
  9655. /**
  9656. * devlink_param_driverinit_value_set - set value of configuration
  9657. * parameter for driverinit
  9658. * configuration mode
  9659. *
  9660. * @devlink: devlink
  9661. * @param_id: parameter ID
  9662. * @init_val: value of parameter to set for driverinit configuration mode
  9663. *
  9664. * This function should be used by the driver to set driverinit
  9665. * configuration mode default value.
  9666. */
  9667. int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
  9668. union devlink_param_value init_val)
  9669. {
  9670. struct devlink_param_item *param_item;
  9671. ASSERT_DEVLINK_NOT_REGISTERED(devlink);
  9672. param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
  9673. if (!param_item)
  9674. return -EINVAL;
  9675. if (!devlink_param_cmode_is_supported(param_item->param,
  9676. DEVLINK_PARAM_CMODE_DRIVERINIT))
  9677. return -EOPNOTSUPP;
  9678. if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
  9679. strcpy(param_item->driverinit_value.vstr, init_val.vstr);
  9680. else
  9681. param_item->driverinit_value = init_val;
  9682. param_item->driverinit_value_valid = true;
  9683. return 0;
  9684. }
  9685. EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set);
  9686. /**
  9687. * devlink_param_value_changed - notify devlink on a parameter's value
  9688. * change. Should be called by the driver
  9689. * right after the change.
  9690. *
  9691. * @devlink: devlink
  9692. * @param_id: parameter ID
  9693. *
  9694. * This function should be used by the driver to notify devlink on value
  9695. * change, excluding driverinit configuration mode.
  9696. * For driverinit configuration mode driver should use the function
  9697. */
  9698. void devlink_param_value_changed(struct devlink *devlink, u32 param_id)
  9699. {
  9700. struct devlink_param_item *param_item;
  9701. param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
  9702. WARN_ON(!param_item);
  9703. devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW);
  9704. }
  9705. EXPORT_SYMBOL_GPL(devlink_param_value_changed);
  9706. /**
  9707. * devl_region_create - create a new address region
  9708. *
  9709. * @devlink: devlink
  9710. * @ops: region operations and name
  9711. * @region_max_snapshots: Maximum supported number of snapshots for region
  9712. * @region_size: size of region
  9713. */
  9714. struct devlink_region *devl_region_create(struct devlink *devlink,
  9715. const struct devlink_region_ops *ops,
  9716. u32 region_max_snapshots,
  9717. u64 region_size)
  9718. {
  9719. struct devlink_region *region;
  9720. devl_assert_locked(devlink);
  9721. if (WARN_ON(!ops) || WARN_ON(!ops->destructor))
  9722. return ERR_PTR(-EINVAL);
  9723. if (devlink_region_get_by_name(devlink, ops->name))
  9724. return ERR_PTR(-EEXIST);
  9725. region = kzalloc(sizeof(*region), GFP_KERNEL);
  9726. if (!region)
  9727. return ERR_PTR(-ENOMEM);
  9728. region->devlink = devlink;
  9729. region->max_snapshots = region_max_snapshots;
  9730. region->ops = ops;
  9731. region->size = region_size;
  9732. INIT_LIST_HEAD(&region->snapshot_list);
  9733. mutex_init(&region->snapshot_lock);
  9734. list_add_tail(&region->list, &devlink->region_list);
  9735. devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
  9736. return region;
  9737. }
  9738. EXPORT_SYMBOL_GPL(devl_region_create);
  9739. /**
  9740. * devlink_region_create - create a new address region
  9741. *
  9742. * @devlink: devlink
  9743. * @ops: region operations and name
  9744. * @region_max_snapshots: Maximum supported number of snapshots for region
  9745. * @region_size: size of region
  9746. *
  9747. * Context: Takes and release devlink->lock <mutex>.
  9748. */
  9749. struct devlink_region *
  9750. devlink_region_create(struct devlink *devlink,
  9751. const struct devlink_region_ops *ops,
  9752. u32 region_max_snapshots, u64 region_size)
  9753. {
  9754. struct devlink_region *region;
  9755. devl_lock(devlink);
  9756. region = devl_region_create(devlink, ops, region_max_snapshots,
  9757. region_size);
  9758. devl_unlock(devlink);
  9759. return region;
  9760. }
  9761. EXPORT_SYMBOL_GPL(devlink_region_create);
  9762. /**
  9763. * devlink_port_region_create - create a new address region for a port
  9764. *
  9765. * @port: devlink port
  9766. * @ops: region operations and name
  9767. * @region_max_snapshots: Maximum supported number of snapshots for region
  9768. * @region_size: size of region
  9769. *
  9770. * Context: Takes and release devlink->lock <mutex>.
  9771. */
  9772. struct devlink_region *
  9773. devlink_port_region_create(struct devlink_port *port,
  9774. const struct devlink_port_region_ops *ops,
  9775. u32 region_max_snapshots, u64 region_size)
  9776. {
  9777. struct devlink *devlink = port->devlink;
  9778. struct devlink_region *region;
  9779. int err = 0;
  9780. ASSERT_DEVLINK_PORT_INITIALIZED(port);
  9781. if (WARN_ON(!ops) || WARN_ON(!ops->destructor))
  9782. return ERR_PTR(-EINVAL);
  9783. devl_lock(devlink);
  9784. if (devlink_port_region_get_by_name(port, ops->name)) {
  9785. err = -EEXIST;
  9786. goto unlock;
  9787. }
  9788. region = kzalloc(sizeof(*region), GFP_KERNEL);
  9789. if (!region) {
  9790. err = -ENOMEM;
  9791. goto unlock;
  9792. }
  9793. region->devlink = devlink;
  9794. region->port = port;
  9795. region->max_snapshots = region_max_snapshots;
  9796. region->port_ops = ops;
  9797. region->size = region_size;
  9798. INIT_LIST_HEAD(&region->snapshot_list);
  9799. mutex_init(&region->snapshot_lock);
  9800. list_add_tail(&region->list, &port->region_list);
  9801. devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
  9802. devl_unlock(devlink);
  9803. return region;
  9804. unlock:
  9805. devl_unlock(devlink);
  9806. return ERR_PTR(err);
  9807. }
  9808. EXPORT_SYMBOL_GPL(devlink_port_region_create);
  9809. /**
  9810. * devl_region_destroy - destroy address region
  9811. *
  9812. * @region: devlink region to destroy
  9813. */
  9814. void devl_region_destroy(struct devlink_region *region)
  9815. {
  9816. struct devlink *devlink = region->devlink;
  9817. struct devlink_snapshot *snapshot, *ts;
  9818. devl_assert_locked(devlink);
  9819. /* Free all snapshots of region */
  9820. mutex_lock(&region->snapshot_lock);
  9821. list_for_each_entry_safe(snapshot, ts, &region->snapshot_list, list)
  9822. devlink_region_snapshot_del(region, snapshot);
  9823. mutex_unlock(&region->snapshot_lock);
  9824. list_del(&region->list);
  9825. mutex_destroy(&region->snapshot_lock);
  9826. devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
  9827. kfree(region);
  9828. }
  9829. EXPORT_SYMBOL_GPL(devl_region_destroy);
  9830. /**
  9831. * devlink_region_destroy - destroy address region
  9832. *
  9833. * @region: devlink region to destroy
  9834. *
  9835. * Context: Takes and release devlink->lock <mutex>.
  9836. */
  9837. void devlink_region_destroy(struct devlink_region *region)
  9838. {
  9839. struct devlink *devlink = region->devlink;
  9840. devl_lock(devlink);
  9841. devl_region_destroy(region);
  9842. devl_unlock(devlink);
  9843. }
  9844. EXPORT_SYMBOL_GPL(devlink_region_destroy);
  9845. /**
  9846. * devlink_region_snapshot_id_get - get snapshot ID
  9847. *
  9848. * This callback should be called when adding a new snapshot,
  9849. * Driver should use the same id for multiple snapshots taken
  9850. * on multiple regions at the same time/by the same trigger.
  9851. *
  9852. * The caller of this function must use devlink_region_snapshot_id_put
  9853. * when finished creating regions using this id.
  9854. *
  9855. * Returns zero on success, or a negative error code on failure.
  9856. *
  9857. * @devlink: devlink
  9858. * @id: storage to return id
  9859. */
  9860. int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
  9861. {
  9862. return __devlink_region_snapshot_id_get(devlink, id);
  9863. }
  9864. EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get);
  9865. /**
  9866. * devlink_region_snapshot_id_put - put snapshot ID reference
  9867. *
  9868. * This should be called by a driver after finishing creating snapshots
  9869. * with an id. Doing so ensures that the ID can later be released in the
  9870. * event that all snapshots using it have been destroyed.
  9871. *
  9872. * @devlink: devlink
  9873. * @id: id to release reference on
  9874. */
  9875. void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id)
  9876. {
  9877. __devlink_snapshot_id_decrement(devlink, id);
  9878. }
  9879. EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_put);
  9880. /**
  9881. * devlink_region_snapshot_create - create a new snapshot
  9882. * This will add a new snapshot of a region. The snapshot
  9883. * will be stored on the region struct and can be accessed
  9884. * from devlink. This is useful for future analyses of snapshots.
  9885. * Multiple snapshots can be created on a region.
  9886. * The @snapshot_id should be obtained using the getter function.
  9887. *
  9888. * @region: devlink region of the snapshot
  9889. * @data: snapshot data
  9890. * @snapshot_id: snapshot id to be created
  9891. */
  9892. int devlink_region_snapshot_create(struct devlink_region *region,
  9893. u8 *data, u32 snapshot_id)
  9894. {
  9895. int err;
  9896. mutex_lock(&region->snapshot_lock);
  9897. err = __devlink_region_snapshot_create(region, data, snapshot_id);
  9898. mutex_unlock(&region->snapshot_lock);
  9899. return err;
  9900. }
  9901. EXPORT_SYMBOL_GPL(devlink_region_snapshot_create);
  9902. #define DEVLINK_TRAP(_id, _type) \
  9903. { \
  9904. .type = DEVLINK_TRAP_TYPE_##_type, \
  9905. .id = DEVLINK_TRAP_GENERIC_ID_##_id, \
  9906. .name = DEVLINK_TRAP_GENERIC_NAME_##_id, \
  9907. }
  9908. static const struct devlink_trap devlink_trap_generic[] = {
  9909. DEVLINK_TRAP(SMAC_MC, DROP),
  9910. DEVLINK_TRAP(VLAN_TAG_MISMATCH, DROP),
  9911. DEVLINK_TRAP(INGRESS_VLAN_FILTER, DROP),
  9912. DEVLINK_TRAP(INGRESS_STP_FILTER, DROP),
  9913. DEVLINK_TRAP(EMPTY_TX_LIST, DROP),
  9914. DEVLINK_TRAP(PORT_LOOPBACK_FILTER, DROP),
  9915. DEVLINK_TRAP(BLACKHOLE_ROUTE, DROP),
  9916. DEVLINK_TRAP(TTL_ERROR, EXCEPTION),
  9917. DEVLINK_TRAP(TAIL_DROP, DROP),
  9918. DEVLINK_TRAP(NON_IP_PACKET, DROP),
  9919. DEVLINK_TRAP(UC_DIP_MC_DMAC, DROP),
  9920. DEVLINK_TRAP(DIP_LB, DROP),
  9921. DEVLINK_TRAP(SIP_MC, DROP),
  9922. DEVLINK_TRAP(SIP_LB, DROP),
  9923. DEVLINK_TRAP(CORRUPTED_IP_HDR, DROP),
  9924. DEVLINK_TRAP(IPV4_SIP_BC, DROP),
  9925. DEVLINK_TRAP(IPV6_MC_DIP_RESERVED_SCOPE, DROP),
  9926. DEVLINK_TRAP(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, DROP),
  9927. DEVLINK_TRAP(MTU_ERROR, EXCEPTION),
  9928. DEVLINK_TRAP(UNRESOLVED_NEIGH, EXCEPTION),
  9929. DEVLINK_TRAP(RPF, EXCEPTION),
  9930. DEVLINK_TRAP(REJECT_ROUTE, EXCEPTION),
  9931. DEVLINK_TRAP(IPV4_LPM_UNICAST_MISS, EXCEPTION),
  9932. DEVLINK_TRAP(IPV6_LPM_UNICAST_MISS, EXCEPTION),
  9933. DEVLINK_TRAP(NON_ROUTABLE, DROP),
  9934. DEVLINK_TRAP(DECAP_ERROR, EXCEPTION),
  9935. DEVLINK_TRAP(OVERLAY_SMAC_MC, DROP),
  9936. DEVLINK_TRAP(INGRESS_FLOW_ACTION_DROP, DROP),
  9937. DEVLINK_TRAP(EGRESS_FLOW_ACTION_DROP, DROP),
  9938. DEVLINK_TRAP(STP, CONTROL),
  9939. DEVLINK_TRAP(LACP, CONTROL),
  9940. DEVLINK_TRAP(LLDP, CONTROL),
  9941. DEVLINK_TRAP(IGMP_QUERY, CONTROL),
  9942. DEVLINK_TRAP(IGMP_V1_REPORT, CONTROL),
  9943. DEVLINK_TRAP(IGMP_V2_REPORT, CONTROL),
  9944. DEVLINK_TRAP(IGMP_V3_REPORT, CONTROL),
  9945. DEVLINK_TRAP(IGMP_V2_LEAVE, CONTROL),
  9946. DEVLINK_TRAP(MLD_QUERY, CONTROL),
  9947. DEVLINK_TRAP(MLD_V1_REPORT, CONTROL),
  9948. DEVLINK_TRAP(MLD_V2_REPORT, CONTROL),
  9949. DEVLINK_TRAP(MLD_V1_DONE, CONTROL),
  9950. DEVLINK_TRAP(IPV4_DHCP, CONTROL),
  9951. DEVLINK_TRAP(IPV6_DHCP, CONTROL),
  9952. DEVLINK_TRAP(ARP_REQUEST, CONTROL),
  9953. DEVLINK_TRAP(ARP_RESPONSE, CONTROL),
  9954. DEVLINK_TRAP(ARP_OVERLAY, CONTROL),
  9955. DEVLINK_TRAP(IPV6_NEIGH_SOLICIT, CONTROL),
  9956. DEVLINK_TRAP(IPV6_NEIGH_ADVERT, CONTROL),
  9957. DEVLINK_TRAP(IPV4_BFD, CONTROL),
  9958. DEVLINK_TRAP(IPV6_BFD, CONTROL),
  9959. DEVLINK_TRAP(IPV4_OSPF, CONTROL),
  9960. DEVLINK_TRAP(IPV6_OSPF, CONTROL),
  9961. DEVLINK_TRAP(IPV4_BGP, CONTROL),
  9962. DEVLINK_TRAP(IPV6_BGP, CONTROL),
  9963. DEVLINK_TRAP(IPV4_VRRP, CONTROL),
  9964. DEVLINK_TRAP(IPV6_VRRP, CONTROL),
  9965. DEVLINK_TRAP(IPV4_PIM, CONTROL),
  9966. DEVLINK_TRAP(IPV6_PIM, CONTROL),
  9967. DEVLINK_TRAP(UC_LB, CONTROL),
  9968. DEVLINK_TRAP(LOCAL_ROUTE, CONTROL),
  9969. DEVLINK_TRAP(EXTERNAL_ROUTE, CONTROL),
  9970. DEVLINK_TRAP(IPV6_UC_DIP_LINK_LOCAL_SCOPE, CONTROL),
  9971. DEVLINK_TRAP(IPV6_DIP_ALL_NODES, CONTROL),
  9972. DEVLINK_TRAP(IPV6_DIP_ALL_ROUTERS, CONTROL),
  9973. DEVLINK_TRAP(IPV6_ROUTER_SOLICIT, CONTROL),
  9974. DEVLINK_TRAP(IPV6_ROUTER_ADVERT, CONTROL),
  9975. DEVLINK_TRAP(IPV6_REDIRECT, CONTROL),
  9976. DEVLINK_TRAP(IPV4_ROUTER_ALERT, CONTROL),
  9977. DEVLINK_TRAP(IPV6_ROUTER_ALERT, CONTROL),
  9978. DEVLINK_TRAP(PTP_EVENT, CONTROL),
  9979. DEVLINK_TRAP(PTP_GENERAL, CONTROL),
  9980. DEVLINK_TRAP(FLOW_ACTION_SAMPLE, CONTROL),
  9981. DEVLINK_TRAP(FLOW_ACTION_TRAP, CONTROL),
  9982. DEVLINK_TRAP(EARLY_DROP, DROP),
  9983. DEVLINK_TRAP(VXLAN_PARSING, DROP),
  9984. DEVLINK_TRAP(LLC_SNAP_PARSING, DROP),
  9985. DEVLINK_TRAP(VLAN_PARSING, DROP),
  9986. DEVLINK_TRAP(PPPOE_PPP_PARSING, DROP),
  9987. DEVLINK_TRAP(MPLS_PARSING, DROP),
  9988. DEVLINK_TRAP(ARP_PARSING, DROP),
  9989. DEVLINK_TRAP(IP_1_PARSING, DROP),
  9990. DEVLINK_TRAP(IP_N_PARSING, DROP),
  9991. DEVLINK_TRAP(GRE_PARSING, DROP),
  9992. DEVLINK_TRAP(UDP_PARSING, DROP),
  9993. DEVLINK_TRAP(TCP_PARSING, DROP),
  9994. DEVLINK_TRAP(IPSEC_PARSING, DROP),
  9995. DEVLINK_TRAP(SCTP_PARSING, DROP),
  9996. DEVLINK_TRAP(DCCP_PARSING, DROP),
  9997. DEVLINK_TRAP(GTP_PARSING, DROP),
  9998. DEVLINK_TRAP(ESP_PARSING, DROP),
  9999. DEVLINK_TRAP(BLACKHOLE_NEXTHOP, DROP),
  10000. DEVLINK_TRAP(DMAC_FILTER, DROP),
  10001. };
  10002. #define DEVLINK_TRAP_GROUP(_id) \
  10003. { \
  10004. .id = DEVLINK_TRAP_GROUP_GENERIC_ID_##_id, \
  10005. .name = DEVLINK_TRAP_GROUP_GENERIC_NAME_##_id, \
  10006. }
  10007. static const struct devlink_trap_group devlink_trap_group_generic[] = {
  10008. DEVLINK_TRAP_GROUP(L2_DROPS),
  10009. DEVLINK_TRAP_GROUP(L3_DROPS),
  10010. DEVLINK_TRAP_GROUP(L3_EXCEPTIONS),
  10011. DEVLINK_TRAP_GROUP(BUFFER_DROPS),
  10012. DEVLINK_TRAP_GROUP(TUNNEL_DROPS),
  10013. DEVLINK_TRAP_GROUP(ACL_DROPS),
  10014. DEVLINK_TRAP_GROUP(STP),
  10015. DEVLINK_TRAP_GROUP(LACP),
  10016. DEVLINK_TRAP_GROUP(LLDP),
  10017. DEVLINK_TRAP_GROUP(MC_SNOOPING),
  10018. DEVLINK_TRAP_GROUP(DHCP),
  10019. DEVLINK_TRAP_GROUP(NEIGH_DISCOVERY),
  10020. DEVLINK_TRAP_GROUP(BFD),
  10021. DEVLINK_TRAP_GROUP(OSPF),
  10022. DEVLINK_TRAP_GROUP(BGP),
  10023. DEVLINK_TRAP_GROUP(VRRP),
  10024. DEVLINK_TRAP_GROUP(PIM),
  10025. DEVLINK_TRAP_GROUP(UC_LB),
  10026. DEVLINK_TRAP_GROUP(LOCAL_DELIVERY),
  10027. DEVLINK_TRAP_GROUP(EXTERNAL_DELIVERY),
  10028. DEVLINK_TRAP_GROUP(IPV6),
  10029. DEVLINK_TRAP_GROUP(PTP_EVENT),
  10030. DEVLINK_TRAP_GROUP(PTP_GENERAL),
  10031. DEVLINK_TRAP_GROUP(ACL_SAMPLE),
  10032. DEVLINK_TRAP_GROUP(ACL_TRAP),
  10033. DEVLINK_TRAP_GROUP(PARSER_ERROR_DROPS),
  10034. };
  10035. static int devlink_trap_generic_verify(const struct devlink_trap *trap)
  10036. {
  10037. if (trap->id > DEVLINK_TRAP_GENERIC_ID_MAX)
  10038. return -EINVAL;
  10039. if (strcmp(trap->name, devlink_trap_generic[trap->id].name))
  10040. return -EINVAL;
  10041. if (trap->type != devlink_trap_generic[trap->id].type)
  10042. return -EINVAL;
  10043. return 0;
  10044. }
  10045. static int devlink_trap_driver_verify(const struct devlink_trap *trap)
  10046. {
  10047. int i;
  10048. if (trap->id <= DEVLINK_TRAP_GENERIC_ID_MAX)
  10049. return -EINVAL;
  10050. for (i = 0; i < ARRAY_SIZE(devlink_trap_generic); i++) {
  10051. if (!strcmp(trap->name, devlink_trap_generic[i].name))
  10052. return -EEXIST;
  10053. }
  10054. return 0;
  10055. }
  10056. static int devlink_trap_verify(const struct devlink_trap *trap)
  10057. {
  10058. if (!trap || !trap->name)
  10059. return -EINVAL;
  10060. if (trap->generic)
  10061. return devlink_trap_generic_verify(trap);
  10062. else
  10063. return devlink_trap_driver_verify(trap);
  10064. }
  10065. static int
  10066. devlink_trap_group_generic_verify(const struct devlink_trap_group *group)
  10067. {
  10068. if (group->id > DEVLINK_TRAP_GROUP_GENERIC_ID_MAX)
  10069. return -EINVAL;
  10070. if (strcmp(group->name, devlink_trap_group_generic[group->id].name))
  10071. return -EINVAL;
  10072. return 0;
  10073. }
  10074. static int
  10075. devlink_trap_group_driver_verify(const struct devlink_trap_group *group)
  10076. {
  10077. int i;
  10078. if (group->id <= DEVLINK_TRAP_GROUP_GENERIC_ID_MAX)
  10079. return -EINVAL;
  10080. for (i = 0; i < ARRAY_SIZE(devlink_trap_group_generic); i++) {
  10081. if (!strcmp(group->name, devlink_trap_group_generic[i].name))
  10082. return -EEXIST;
  10083. }
  10084. return 0;
  10085. }
  10086. static int devlink_trap_group_verify(const struct devlink_trap_group *group)
  10087. {
  10088. if (group->generic)
  10089. return devlink_trap_group_generic_verify(group);
  10090. else
  10091. return devlink_trap_group_driver_verify(group);
  10092. }
  10093. static void
  10094. devlink_trap_group_notify(struct devlink *devlink,
  10095. const struct devlink_trap_group_item *group_item,
  10096. enum devlink_command cmd)
  10097. {
  10098. struct sk_buff *msg;
  10099. int err;
  10100. WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_GROUP_NEW &&
  10101. cmd != DEVLINK_CMD_TRAP_GROUP_DEL);
  10102. if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
  10103. return;
  10104. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  10105. if (!msg)
  10106. return;
  10107. err = devlink_nl_trap_group_fill(msg, devlink, group_item, cmd, 0, 0,
  10108. 0);
  10109. if (err) {
  10110. nlmsg_free(msg);
  10111. return;
  10112. }
  10113. genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
  10114. msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
  10115. }
  10116. static int
  10117. devlink_trap_item_group_link(struct devlink *devlink,
  10118. struct devlink_trap_item *trap_item)
  10119. {
  10120. u16 group_id = trap_item->trap->init_group_id;
  10121. struct devlink_trap_group_item *group_item;
  10122. group_item = devlink_trap_group_item_lookup_by_id(devlink, group_id);
  10123. if (WARN_ON_ONCE(!group_item))
  10124. return -EINVAL;
  10125. trap_item->group_item = group_item;
  10126. return 0;
  10127. }
  10128. static void devlink_trap_notify(struct devlink *devlink,
  10129. const struct devlink_trap_item *trap_item,
  10130. enum devlink_command cmd)
  10131. {
  10132. struct sk_buff *msg;
  10133. int err;
  10134. WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_NEW &&
  10135. cmd != DEVLINK_CMD_TRAP_DEL);
  10136. if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
  10137. return;
  10138. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  10139. if (!msg)
  10140. return;
  10141. err = devlink_nl_trap_fill(msg, devlink, trap_item, cmd, 0, 0, 0);
  10142. if (err) {
  10143. nlmsg_free(msg);
  10144. return;
  10145. }
  10146. genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
  10147. msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
  10148. }
  10149. static int
  10150. devlink_trap_register(struct devlink *devlink,
  10151. const struct devlink_trap *trap, void *priv)
  10152. {
  10153. struct devlink_trap_item *trap_item;
  10154. int err;
  10155. if (devlink_trap_item_lookup(devlink, trap->name))
  10156. return -EEXIST;
  10157. trap_item = kzalloc(sizeof(*trap_item), GFP_KERNEL);
  10158. if (!trap_item)
  10159. return -ENOMEM;
  10160. trap_item->stats = netdev_alloc_pcpu_stats(struct devlink_stats);
  10161. if (!trap_item->stats) {
  10162. err = -ENOMEM;
  10163. goto err_stats_alloc;
  10164. }
  10165. trap_item->trap = trap;
  10166. trap_item->action = trap->init_action;
  10167. trap_item->priv = priv;
  10168. err = devlink_trap_item_group_link(devlink, trap_item);
  10169. if (err)
  10170. goto err_group_link;
  10171. err = devlink->ops->trap_init(devlink, trap, trap_item);
  10172. if (err)
  10173. goto err_trap_init;
  10174. list_add_tail(&trap_item->list, &devlink->trap_list);
  10175. devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_NEW);
  10176. return 0;
  10177. err_trap_init:
  10178. err_group_link:
  10179. free_percpu(trap_item->stats);
  10180. err_stats_alloc:
  10181. kfree(trap_item);
  10182. return err;
  10183. }
  10184. static void devlink_trap_unregister(struct devlink *devlink,
  10185. const struct devlink_trap *trap)
  10186. {
  10187. struct devlink_trap_item *trap_item;
  10188. trap_item = devlink_trap_item_lookup(devlink, trap->name);
  10189. if (WARN_ON_ONCE(!trap_item))
  10190. return;
  10191. devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_DEL);
  10192. list_del(&trap_item->list);
  10193. if (devlink->ops->trap_fini)
  10194. devlink->ops->trap_fini(devlink, trap, trap_item);
  10195. free_percpu(trap_item->stats);
  10196. kfree(trap_item);
  10197. }
  10198. static void devlink_trap_disable(struct devlink *devlink,
  10199. const struct devlink_trap *trap)
  10200. {
  10201. struct devlink_trap_item *trap_item;
  10202. trap_item = devlink_trap_item_lookup(devlink, trap->name);
  10203. if (WARN_ON_ONCE(!trap_item))
  10204. return;
  10205. devlink->ops->trap_action_set(devlink, trap, DEVLINK_TRAP_ACTION_DROP,
  10206. NULL);
  10207. trap_item->action = DEVLINK_TRAP_ACTION_DROP;
  10208. }
  10209. /**
  10210. * devl_traps_register - Register packet traps with devlink.
  10211. * @devlink: devlink.
  10212. * @traps: Packet traps.
  10213. * @traps_count: Count of provided packet traps.
  10214. * @priv: Driver private information.
  10215. *
  10216. * Return: Non-zero value on failure.
  10217. */
  10218. int devl_traps_register(struct devlink *devlink,
  10219. const struct devlink_trap *traps,
  10220. size_t traps_count, void *priv)
  10221. {
  10222. int i, err;
  10223. if (!devlink->ops->trap_init || !devlink->ops->trap_action_set)
  10224. return -EINVAL;
  10225. devl_assert_locked(devlink);
  10226. for (i = 0; i < traps_count; i++) {
  10227. const struct devlink_trap *trap = &traps[i];
  10228. err = devlink_trap_verify(trap);
  10229. if (err)
  10230. goto err_trap_verify;
  10231. err = devlink_trap_register(devlink, trap, priv);
  10232. if (err)
  10233. goto err_trap_register;
  10234. }
  10235. return 0;
  10236. err_trap_register:
  10237. err_trap_verify:
  10238. for (i--; i >= 0; i--)
  10239. devlink_trap_unregister(devlink, &traps[i]);
  10240. return err;
  10241. }
  10242. EXPORT_SYMBOL_GPL(devl_traps_register);
  10243. /**
  10244. * devlink_traps_register - Register packet traps with devlink.
  10245. * @devlink: devlink.
  10246. * @traps: Packet traps.
  10247. * @traps_count: Count of provided packet traps.
  10248. * @priv: Driver private information.
  10249. *
  10250. * Context: Takes and release devlink->lock <mutex>.
  10251. *
  10252. * Return: Non-zero value on failure.
  10253. */
  10254. int devlink_traps_register(struct devlink *devlink,
  10255. const struct devlink_trap *traps,
  10256. size_t traps_count, void *priv)
  10257. {
  10258. int err;
  10259. devl_lock(devlink);
  10260. err = devl_traps_register(devlink, traps, traps_count, priv);
  10261. devl_unlock(devlink);
  10262. return err;
  10263. }
  10264. EXPORT_SYMBOL_GPL(devlink_traps_register);
  10265. /**
  10266. * devl_traps_unregister - Unregister packet traps from devlink.
  10267. * @devlink: devlink.
  10268. * @traps: Packet traps.
  10269. * @traps_count: Count of provided packet traps.
  10270. */
  10271. void devl_traps_unregister(struct devlink *devlink,
  10272. const struct devlink_trap *traps,
  10273. size_t traps_count)
  10274. {
  10275. int i;
  10276. devl_assert_locked(devlink);
  10277. /* Make sure we do not have any packets in-flight while unregistering
  10278. * traps by disabling all of them and waiting for a grace period.
  10279. */
  10280. for (i = traps_count - 1; i >= 0; i--)
  10281. devlink_trap_disable(devlink, &traps[i]);
  10282. synchronize_rcu();
  10283. for (i = traps_count - 1; i >= 0; i--)
  10284. devlink_trap_unregister(devlink, &traps[i]);
  10285. }
  10286. EXPORT_SYMBOL_GPL(devl_traps_unregister);
  10287. /**
  10288. * devlink_traps_unregister - Unregister packet traps from devlink.
  10289. * @devlink: devlink.
  10290. * @traps: Packet traps.
  10291. * @traps_count: Count of provided packet traps.
  10292. *
  10293. * Context: Takes and release devlink->lock <mutex>.
  10294. */
  10295. void devlink_traps_unregister(struct devlink *devlink,
  10296. const struct devlink_trap *traps,
  10297. size_t traps_count)
  10298. {
  10299. devl_lock(devlink);
  10300. devl_traps_unregister(devlink, traps, traps_count);
  10301. devl_unlock(devlink);
  10302. }
  10303. EXPORT_SYMBOL_GPL(devlink_traps_unregister);
  10304. static void
  10305. devlink_trap_stats_update(struct devlink_stats __percpu *trap_stats,
  10306. size_t skb_len)
  10307. {
  10308. struct devlink_stats *stats;
  10309. stats = this_cpu_ptr(trap_stats);
  10310. u64_stats_update_begin(&stats->syncp);
  10311. u64_stats_add(&stats->rx_bytes, skb_len);
  10312. u64_stats_inc(&stats->rx_packets);
  10313. u64_stats_update_end(&stats->syncp);
  10314. }
  10315. static void
  10316. devlink_trap_report_metadata_set(struct devlink_trap_metadata *metadata,
  10317. const struct devlink_trap_item *trap_item,
  10318. struct devlink_port *in_devlink_port,
  10319. const struct flow_action_cookie *fa_cookie)
  10320. {
  10321. metadata->trap_name = trap_item->trap->name;
  10322. metadata->trap_group_name = trap_item->group_item->group->name;
  10323. metadata->fa_cookie = fa_cookie;
  10324. metadata->trap_type = trap_item->trap->type;
  10325. spin_lock(&in_devlink_port->type_lock);
  10326. if (in_devlink_port->type == DEVLINK_PORT_TYPE_ETH)
  10327. metadata->input_dev = in_devlink_port->type_dev;
  10328. spin_unlock(&in_devlink_port->type_lock);
  10329. }
  10330. /**
  10331. * devlink_trap_report - Report trapped packet to drop monitor.
  10332. * @devlink: devlink.
  10333. * @skb: Trapped packet.
  10334. * @trap_ctx: Trap context.
  10335. * @in_devlink_port: Input devlink port.
  10336. * @fa_cookie: Flow action cookie. Could be NULL.
  10337. */
  10338. void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb,
  10339. void *trap_ctx, struct devlink_port *in_devlink_port,
  10340. const struct flow_action_cookie *fa_cookie)
  10341. {
  10342. struct devlink_trap_item *trap_item = trap_ctx;
  10343. devlink_trap_stats_update(trap_item->stats, skb->len);
  10344. devlink_trap_stats_update(trap_item->group_item->stats, skb->len);
  10345. if (trace_devlink_trap_report_enabled()) {
  10346. struct devlink_trap_metadata metadata = {};
  10347. devlink_trap_report_metadata_set(&metadata, trap_item,
  10348. in_devlink_port, fa_cookie);
  10349. trace_devlink_trap_report(devlink, skb, &metadata);
  10350. }
  10351. }
  10352. EXPORT_SYMBOL_GPL(devlink_trap_report);
  10353. /**
  10354. * devlink_trap_ctx_priv - Trap context to driver private information.
  10355. * @trap_ctx: Trap context.
  10356. *
  10357. * Return: Driver private information passed during registration.
  10358. */
  10359. void *devlink_trap_ctx_priv(void *trap_ctx)
  10360. {
  10361. struct devlink_trap_item *trap_item = trap_ctx;
  10362. return trap_item->priv;
  10363. }
  10364. EXPORT_SYMBOL_GPL(devlink_trap_ctx_priv);
  10365. static int
  10366. devlink_trap_group_item_policer_link(struct devlink *devlink,
  10367. struct devlink_trap_group_item *group_item)
  10368. {
  10369. u32 policer_id = group_item->group->init_policer_id;
  10370. struct devlink_trap_policer_item *policer_item;
  10371. if (policer_id == 0)
  10372. return 0;
  10373. policer_item = devlink_trap_policer_item_lookup(devlink, policer_id);
  10374. if (WARN_ON_ONCE(!policer_item))
  10375. return -EINVAL;
  10376. group_item->policer_item = policer_item;
  10377. return 0;
  10378. }
  10379. static int
  10380. devlink_trap_group_register(struct devlink *devlink,
  10381. const struct devlink_trap_group *group)
  10382. {
  10383. struct devlink_trap_group_item *group_item;
  10384. int err;
  10385. if (devlink_trap_group_item_lookup(devlink, group->name))
  10386. return -EEXIST;
  10387. group_item = kzalloc(sizeof(*group_item), GFP_KERNEL);
  10388. if (!group_item)
  10389. return -ENOMEM;
  10390. group_item->stats = netdev_alloc_pcpu_stats(struct devlink_stats);
  10391. if (!group_item->stats) {
  10392. err = -ENOMEM;
  10393. goto err_stats_alloc;
  10394. }
  10395. group_item->group = group;
  10396. err = devlink_trap_group_item_policer_link(devlink, group_item);
  10397. if (err)
  10398. goto err_policer_link;
  10399. if (devlink->ops->trap_group_init) {
  10400. err = devlink->ops->trap_group_init(devlink, group);
  10401. if (err)
  10402. goto err_group_init;
  10403. }
  10404. list_add_tail(&group_item->list, &devlink->trap_group_list);
  10405. devlink_trap_group_notify(devlink, group_item,
  10406. DEVLINK_CMD_TRAP_GROUP_NEW);
  10407. return 0;
  10408. err_group_init:
  10409. err_policer_link:
  10410. free_percpu(group_item->stats);
  10411. err_stats_alloc:
  10412. kfree(group_item);
  10413. return err;
  10414. }
  10415. static void
  10416. devlink_trap_group_unregister(struct devlink *devlink,
  10417. const struct devlink_trap_group *group)
  10418. {
  10419. struct devlink_trap_group_item *group_item;
  10420. group_item = devlink_trap_group_item_lookup(devlink, group->name);
  10421. if (WARN_ON_ONCE(!group_item))
  10422. return;
  10423. devlink_trap_group_notify(devlink, group_item,
  10424. DEVLINK_CMD_TRAP_GROUP_DEL);
  10425. list_del(&group_item->list);
  10426. free_percpu(group_item->stats);
  10427. kfree(group_item);
  10428. }
  10429. /**
  10430. * devl_trap_groups_register - Register packet trap groups with devlink.
  10431. * @devlink: devlink.
  10432. * @groups: Packet trap groups.
  10433. * @groups_count: Count of provided packet trap groups.
  10434. *
  10435. * Return: Non-zero value on failure.
  10436. */
  10437. int devl_trap_groups_register(struct devlink *devlink,
  10438. const struct devlink_trap_group *groups,
  10439. size_t groups_count)
  10440. {
  10441. int i, err;
  10442. devl_assert_locked(devlink);
  10443. for (i = 0; i < groups_count; i++) {
  10444. const struct devlink_trap_group *group = &groups[i];
  10445. err = devlink_trap_group_verify(group);
  10446. if (err)
  10447. goto err_trap_group_verify;
  10448. err = devlink_trap_group_register(devlink, group);
  10449. if (err)
  10450. goto err_trap_group_register;
  10451. }
  10452. return 0;
  10453. err_trap_group_register:
  10454. err_trap_group_verify:
  10455. for (i--; i >= 0; i--)
  10456. devlink_trap_group_unregister(devlink, &groups[i]);
  10457. return err;
  10458. }
  10459. EXPORT_SYMBOL_GPL(devl_trap_groups_register);
  10460. /**
  10461. * devlink_trap_groups_register - Register packet trap groups with devlink.
  10462. * @devlink: devlink.
  10463. * @groups: Packet trap groups.
  10464. * @groups_count: Count of provided packet trap groups.
  10465. *
  10466. * Context: Takes and release devlink->lock <mutex>.
  10467. *
  10468. * Return: Non-zero value on failure.
  10469. */
  10470. int devlink_trap_groups_register(struct devlink *devlink,
  10471. const struct devlink_trap_group *groups,
  10472. size_t groups_count)
  10473. {
  10474. int err;
  10475. devl_lock(devlink);
  10476. err = devl_trap_groups_register(devlink, groups, groups_count);
  10477. devl_unlock(devlink);
  10478. return err;
  10479. }
  10480. EXPORT_SYMBOL_GPL(devlink_trap_groups_register);
  10481. /**
  10482. * devl_trap_groups_unregister - Unregister packet trap groups from devlink.
  10483. * @devlink: devlink.
  10484. * @groups: Packet trap groups.
  10485. * @groups_count: Count of provided packet trap groups.
  10486. */
  10487. void devl_trap_groups_unregister(struct devlink *devlink,
  10488. const struct devlink_trap_group *groups,
  10489. size_t groups_count)
  10490. {
  10491. int i;
  10492. devl_assert_locked(devlink);
  10493. for (i = groups_count - 1; i >= 0; i--)
  10494. devlink_trap_group_unregister(devlink, &groups[i]);
  10495. }
  10496. EXPORT_SYMBOL_GPL(devl_trap_groups_unregister);
  10497. /**
  10498. * devlink_trap_groups_unregister - Unregister packet trap groups from devlink.
  10499. * @devlink: devlink.
  10500. * @groups: Packet trap groups.
  10501. * @groups_count: Count of provided packet trap groups.
  10502. *
  10503. * Context: Takes and release devlink->lock <mutex>.
  10504. */
  10505. void devlink_trap_groups_unregister(struct devlink *devlink,
  10506. const struct devlink_trap_group *groups,
  10507. size_t groups_count)
  10508. {
  10509. devl_lock(devlink);
  10510. devl_trap_groups_unregister(devlink, groups, groups_count);
  10511. devl_unlock(devlink);
  10512. }
  10513. EXPORT_SYMBOL_GPL(devlink_trap_groups_unregister);
  10514. static void
  10515. devlink_trap_policer_notify(struct devlink *devlink,
  10516. const struct devlink_trap_policer_item *policer_item,
  10517. enum devlink_command cmd)
  10518. {
  10519. struct sk_buff *msg;
  10520. int err;
  10521. WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_POLICER_NEW &&
  10522. cmd != DEVLINK_CMD_TRAP_POLICER_DEL);
  10523. if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
  10524. return;
  10525. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  10526. if (!msg)
  10527. return;
  10528. err = devlink_nl_trap_policer_fill(msg, devlink, policer_item, cmd, 0,
  10529. 0, 0);
  10530. if (err) {
  10531. nlmsg_free(msg);
  10532. return;
  10533. }
  10534. genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
  10535. msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
  10536. }
  10537. static int
  10538. devlink_trap_policer_register(struct devlink *devlink,
  10539. const struct devlink_trap_policer *policer)
  10540. {
  10541. struct devlink_trap_policer_item *policer_item;
  10542. int err;
  10543. if (devlink_trap_policer_item_lookup(devlink, policer->id))
  10544. return -EEXIST;
  10545. policer_item = kzalloc(sizeof(*policer_item), GFP_KERNEL);
  10546. if (!policer_item)
  10547. return -ENOMEM;
  10548. policer_item->policer = policer;
  10549. policer_item->rate = policer->init_rate;
  10550. policer_item->burst = policer->init_burst;
  10551. if (devlink->ops->trap_policer_init) {
  10552. err = devlink->ops->trap_policer_init(devlink, policer);
  10553. if (err)
  10554. goto err_policer_init;
  10555. }
  10556. list_add_tail(&policer_item->list, &devlink->trap_policer_list);
  10557. devlink_trap_policer_notify(devlink, policer_item,
  10558. DEVLINK_CMD_TRAP_POLICER_NEW);
  10559. return 0;
  10560. err_policer_init:
  10561. kfree(policer_item);
  10562. return err;
  10563. }
  10564. static void
  10565. devlink_trap_policer_unregister(struct devlink *devlink,
  10566. const struct devlink_trap_policer *policer)
  10567. {
  10568. struct devlink_trap_policer_item *policer_item;
  10569. policer_item = devlink_trap_policer_item_lookup(devlink, policer->id);
  10570. if (WARN_ON_ONCE(!policer_item))
  10571. return;
  10572. devlink_trap_policer_notify(devlink, policer_item,
  10573. DEVLINK_CMD_TRAP_POLICER_DEL);
  10574. list_del(&policer_item->list);
  10575. if (devlink->ops->trap_policer_fini)
  10576. devlink->ops->trap_policer_fini(devlink, policer);
  10577. kfree(policer_item);
  10578. }
  10579. /**
  10580. * devl_trap_policers_register - Register packet trap policers with devlink.
  10581. * @devlink: devlink.
  10582. * @policers: Packet trap policers.
  10583. * @policers_count: Count of provided packet trap policers.
  10584. *
  10585. * Return: Non-zero value on failure.
  10586. */
  10587. int
  10588. devl_trap_policers_register(struct devlink *devlink,
  10589. const struct devlink_trap_policer *policers,
  10590. size_t policers_count)
  10591. {
  10592. int i, err;
  10593. devl_assert_locked(devlink);
  10594. for (i = 0; i < policers_count; i++) {
  10595. const struct devlink_trap_policer *policer = &policers[i];
  10596. if (WARN_ON(policer->id == 0 ||
  10597. policer->max_rate < policer->min_rate ||
  10598. policer->max_burst < policer->min_burst)) {
  10599. err = -EINVAL;
  10600. goto err_trap_policer_verify;
  10601. }
  10602. err = devlink_trap_policer_register(devlink, policer);
  10603. if (err)
  10604. goto err_trap_policer_register;
  10605. }
  10606. return 0;
  10607. err_trap_policer_register:
  10608. err_trap_policer_verify:
  10609. for (i--; i >= 0; i--)
  10610. devlink_trap_policer_unregister(devlink, &policers[i]);
  10611. return err;
  10612. }
  10613. EXPORT_SYMBOL_GPL(devl_trap_policers_register);
  10614. /**
  10615. * devl_trap_policers_unregister - Unregister packet trap policers from devlink.
  10616. * @devlink: devlink.
  10617. * @policers: Packet trap policers.
  10618. * @policers_count: Count of provided packet trap policers.
  10619. */
  10620. void
  10621. devl_trap_policers_unregister(struct devlink *devlink,
  10622. const struct devlink_trap_policer *policers,
  10623. size_t policers_count)
  10624. {
  10625. int i;
  10626. devl_assert_locked(devlink);
  10627. for (i = policers_count - 1; i >= 0; i--)
  10628. devlink_trap_policer_unregister(devlink, &policers[i]);
  10629. }
  10630. EXPORT_SYMBOL_GPL(devl_trap_policers_unregister);
  10631. static void __devlink_compat_running_version(struct devlink *devlink,
  10632. char *buf, size_t len)
  10633. {
  10634. struct devlink_info_req req = {};
  10635. const struct nlattr *nlattr;
  10636. struct sk_buff *msg;
  10637. int rem, err;
  10638. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  10639. if (!msg)
  10640. return;
  10641. req.msg = msg;
  10642. err = devlink->ops->info_get(devlink, &req, NULL);
  10643. if (err)
  10644. goto free_msg;
  10645. nla_for_each_attr(nlattr, (void *)msg->data, msg->len, rem) {
  10646. const struct nlattr *kv;
  10647. int rem_kv;
  10648. if (nla_type(nlattr) != DEVLINK_ATTR_INFO_VERSION_RUNNING)
  10649. continue;
  10650. nla_for_each_nested(kv, nlattr, rem_kv) {
  10651. if (nla_type(kv) != DEVLINK_ATTR_INFO_VERSION_VALUE)
  10652. continue;
  10653. strlcat(buf, nla_data(kv), len);
  10654. strlcat(buf, " ", len);
  10655. }
  10656. }
  10657. free_msg:
  10658. nlmsg_free(msg);
  10659. }
  10660. static struct devlink_port *netdev_to_devlink_port(struct net_device *dev)
  10661. {
  10662. if (!dev->netdev_ops->ndo_get_devlink_port)
  10663. return NULL;
  10664. return dev->netdev_ops->ndo_get_devlink_port(dev);
  10665. }
  10666. void devlink_compat_running_version(struct devlink *devlink,
  10667. char *buf, size_t len)
  10668. {
  10669. if (!devlink->ops->info_get)
  10670. return;
  10671. devl_lock(devlink);
  10672. __devlink_compat_running_version(devlink, buf, len);
  10673. devl_unlock(devlink);
  10674. }
  10675. int devlink_compat_flash_update(struct devlink *devlink, const char *file_name)
  10676. {
  10677. struct devlink_flash_update_params params = {};
  10678. int ret;
  10679. if (!devlink->ops->flash_update)
  10680. return -EOPNOTSUPP;
  10681. ret = request_firmware(&params.fw, file_name, devlink->dev);
  10682. if (ret)
  10683. return ret;
  10684. devl_lock(devlink);
  10685. devlink_flash_update_begin_notify(devlink);
  10686. ret = devlink->ops->flash_update(devlink, &params, NULL);
  10687. devlink_flash_update_end_notify(devlink);
  10688. devl_unlock(devlink);
  10689. release_firmware(params.fw);
  10690. return ret;
  10691. }
  10692. int devlink_compat_phys_port_name_get(struct net_device *dev,
  10693. char *name, size_t len)
  10694. {
  10695. struct devlink_port *devlink_port;
  10696. /* RTNL mutex is held here which ensures that devlink_port
  10697. * instance cannot disappear in the middle. No need to take
  10698. * any devlink lock as only permanent values are accessed.
  10699. */
  10700. ASSERT_RTNL();
  10701. devlink_port = netdev_to_devlink_port(dev);
  10702. if (!devlink_port)
  10703. return -EOPNOTSUPP;
  10704. return __devlink_port_phys_port_name_get(devlink_port, name, len);
  10705. }
  10706. int devlink_compat_switch_id_get(struct net_device *dev,
  10707. struct netdev_phys_item_id *ppid)
  10708. {
  10709. struct devlink_port *devlink_port;
  10710. /* Caller must hold RTNL mutex or reference to dev, which ensures that
  10711. * devlink_port instance cannot disappear in the middle. No need to take
  10712. * any devlink lock as only permanent values are accessed.
  10713. */
  10714. devlink_port = netdev_to_devlink_port(dev);
  10715. if (!devlink_port || !devlink_port->switch_port)
  10716. return -EOPNOTSUPP;
  10717. memcpy(ppid, &devlink_port->attrs.switch_id, sizeof(*ppid));
  10718. return 0;
  10719. }
  10720. static void __net_exit devlink_pernet_pre_exit(struct net *net)
  10721. {
  10722. struct devlink *devlink;
  10723. u32 actions_performed;
  10724. unsigned long index;
  10725. int err;
  10726. /* In case network namespace is getting destroyed, reload
  10727. * all devlink instances from this namespace into init_net.
  10728. */
  10729. devlinks_xa_for_each_registered_get(net, index, devlink) {
  10730. WARN_ON(!(devlink->features & DEVLINK_F_RELOAD));
  10731. mutex_lock(&devlink->lock);
  10732. err = devlink_reload(devlink, &init_net,
  10733. DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
  10734. DEVLINK_RELOAD_LIMIT_UNSPEC,
  10735. &actions_performed, NULL);
  10736. mutex_unlock(&devlink->lock);
  10737. if (err && err != -EOPNOTSUPP)
  10738. pr_warn("Failed to reload devlink instance into init_net\n");
  10739. devlink_put(devlink);
  10740. }
  10741. }
  10742. static struct pernet_operations devlink_pernet_ops __net_initdata = {
  10743. .pre_exit = devlink_pernet_pre_exit,
  10744. };
  10745. static int __init devlink_init(void)
  10746. {
  10747. int err;
  10748. err = genl_register_family(&devlink_nl_family);
  10749. if (err)
  10750. goto out;
  10751. err = register_pernet_subsys(&devlink_pernet_ops);
  10752. out:
  10753. WARN_ON(err);
  10754. return err;
  10755. }
  10756. subsys_initcall(devlink_init);