ksz884x.c 177 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * drivers/net/ethernet/micrel/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
  4. *
  5. * Copyright (c) 2009-2010 Micrel, Inc.
  6. * Tristram Ha <[email protected]>
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/init.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/ioport.h>
  14. #include <linux/pci.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/mii.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/ethtool.h>
  19. #include <linux/etherdevice.h>
  20. #include <linux/in.h>
  21. #include <linux/ip.h>
  22. #include <linux/if_vlan.h>
  23. #include <linux/crc32.h>
  24. #include <linux/sched.h>
  25. #include <linux/slab.h>
  26. #include <linux/micrel_phy.h>
  27. /* DMA Registers */
  28. #define KS_DMA_TX_CTRL 0x0000
  29. #define DMA_TX_ENABLE 0x00000001
  30. #define DMA_TX_CRC_ENABLE 0x00000002
  31. #define DMA_TX_PAD_ENABLE 0x00000004
  32. #define DMA_TX_LOOPBACK 0x00000100
  33. #define DMA_TX_FLOW_ENABLE 0x00000200
  34. #define DMA_TX_CSUM_IP 0x00010000
  35. #define DMA_TX_CSUM_TCP 0x00020000
  36. #define DMA_TX_CSUM_UDP 0x00040000
  37. #define DMA_TX_BURST_SIZE 0x3F000000
  38. #define KS_DMA_RX_CTRL 0x0004
  39. #define DMA_RX_ENABLE 0x00000001
  40. #define KS884X_DMA_RX_MULTICAST 0x00000002
  41. #define DMA_RX_PROMISCUOUS 0x00000004
  42. #define DMA_RX_ERROR 0x00000008
  43. #define DMA_RX_UNICAST 0x00000010
  44. #define DMA_RX_ALL_MULTICAST 0x00000020
  45. #define DMA_RX_BROADCAST 0x00000040
  46. #define DMA_RX_FLOW_ENABLE 0x00000200
  47. #define DMA_RX_CSUM_IP 0x00010000
  48. #define DMA_RX_CSUM_TCP 0x00020000
  49. #define DMA_RX_CSUM_UDP 0x00040000
  50. #define DMA_RX_BURST_SIZE 0x3F000000
  51. #define DMA_BURST_SHIFT 24
  52. #define DMA_BURST_DEFAULT 8
  53. #define KS_DMA_TX_START 0x0008
  54. #define KS_DMA_RX_START 0x000C
  55. #define DMA_START 0x00000001
  56. #define KS_DMA_TX_ADDR 0x0010
  57. #define KS_DMA_RX_ADDR 0x0014
  58. #define DMA_ADDR_LIST_MASK 0xFFFFFFFC
  59. #define DMA_ADDR_LIST_SHIFT 2
  60. /* MTR0 */
  61. #define KS884X_MULTICAST_0_OFFSET 0x0020
  62. #define KS884X_MULTICAST_1_OFFSET 0x0021
  63. #define KS884X_MULTICAST_2_OFFSET 0x0022
  64. #define KS884x_MULTICAST_3_OFFSET 0x0023
  65. /* MTR1 */
  66. #define KS884X_MULTICAST_4_OFFSET 0x0024
  67. #define KS884X_MULTICAST_5_OFFSET 0x0025
  68. #define KS884X_MULTICAST_6_OFFSET 0x0026
  69. #define KS884X_MULTICAST_7_OFFSET 0x0027
  70. /* Interrupt Registers */
  71. /* INTEN */
  72. #define KS884X_INTERRUPTS_ENABLE 0x0028
  73. /* INTST */
  74. #define KS884X_INTERRUPTS_STATUS 0x002C
  75. #define KS884X_INT_RX_STOPPED 0x02000000
  76. #define KS884X_INT_TX_STOPPED 0x04000000
  77. #define KS884X_INT_RX_OVERRUN 0x08000000
  78. #define KS884X_INT_TX_EMPTY 0x10000000
  79. #define KS884X_INT_RX 0x20000000
  80. #define KS884X_INT_TX 0x40000000
  81. #define KS884X_INT_PHY 0x80000000
  82. #define KS884X_INT_RX_MASK \
  83. (KS884X_INT_RX | KS884X_INT_RX_OVERRUN)
  84. #define KS884X_INT_TX_MASK \
  85. (KS884X_INT_TX | KS884X_INT_TX_EMPTY)
  86. #define KS884X_INT_MASK (KS884X_INT_RX | KS884X_INT_TX | KS884X_INT_PHY)
  87. /* MAC Additional Station Address */
  88. /* MAAL0 */
  89. #define KS_ADD_ADDR_0_LO 0x0080
  90. /* MAAH0 */
  91. #define KS_ADD_ADDR_0_HI 0x0084
  92. /* MAAL1 */
  93. #define KS_ADD_ADDR_1_LO 0x0088
  94. /* MAAH1 */
  95. #define KS_ADD_ADDR_1_HI 0x008C
  96. /* MAAL2 */
  97. #define KS_ADD_ADDR_2_LO 0x0090
  98. /* MAAH2 */
  99. #define KS_ADD_ADDR_2_HI 0x0094
  100. /* MAAL3 */
  101. #define KS_ADD_ADDR_3_LO 0x0098
  102. /* MAAH3 */
  103. #define KS_ADD_ADDR_3_HI 0x009C
  104. /* MAAL4 */
  105. #define KS_ADD_ADDR_4_LO 0x00A0
  106. /* MAAH4 */
  107. #define KS_ADD_ADDR_4_HI 0x00A4
  108. /* MAAL5 */
  109. #define KS_ADD_ADDR_5_LO 0x00A8
  110. /* MAAH5 */
  111. #define KS_ADD_ADDR_5_HI 0x00AC
  112. /* MAAL6 */
  113. #define KS_ADD_ADDR_6_LO 0x00B0
  114. /* MAAH6 */
  115. #define KS_ADD_ADDR_6_HI 0x00B4
  116. /* MAAL7 */
  117. #define KS_ADD_ADDR_7_LO 0x00B8
  118. /* MAAH7 */
  119. #define KS_ADD_ADDR_7_HI 0x00BC
  120. /* MAAL8 */
  121. #define KS_ADD_ADDR_8_LO 0x00C0
  122. /* MAAH8 */
  123. #define KS_ADD_ADDR_8_HI 0x00C4
  124. /* MAAL9 */
  125. #define KS_ADD_ADDR_9_LO 0x00C8
  126. /* MAAH9 */
  127. #define KS_ADD_ADDR_9_HI 0x00CC
  128. /* MAAL10 */
  129. #define KS_ADD_ADDR_A_LO 0x00D0
  130. /* MAAH10 */
  131. #define KS_ADD_ADDR_A_HI 0x00D4
  132. /* MAAL11 */
  133. #define KS_ADD_ADDR_B_LO 0x00D8
  134. /* MAAH11 */
  135. #define KS_ADD_ADDR_B_HI 0x00DC
  136. /* MAAL12 */
  137. #define KS_ADD_ADDR_C_LO 0x00E0
  138. /* MAAH12 */
  139. #define KS_ADD_ADDR_C_HI 0x00E4
  140. /* MAAL13 */
  141. #define KS_ADD_ADDR_D_LO 0x00E8
  142. /* MAAH13 */
  143. #define KS_ADD_ADDR_D_HI 0x00EC
  144. /* MAAL14 */
  145. #define KS_ADD_ADDR_E_LO 0x00F0
  146. /* MAAH14 */
  147. #define KS_ADD_ADDR_E_HI 0x00F4
  148. /* MAAL15 */
  149. #define KS_ADD_ADDR_F_LO 0x00F8
  150. /* MAAH15 */
  151. #define KS_ADD_ADDR_F_HI 0x00FC
  152. #define ADD_ADDR_HI_MASK 0x0000FFFF
  153. #define ADD_ADDR_ENABLE 0x80000000
  154. #define ADD_ADDR_INCR 8
  155. /* Miscellaneous Registers */
  156. /* MARL */
  157. #define KS884X_ADDR_0_OFFSET 0x0200
  158. #define KS884X_ADDR_1_OFFSET 0x0201
  159. /* MARM */
  160. #define KS884X_ADDR_2_OFFSET 0x0202
  161. #define KS884X_ADDR_3_OFFSET 0x0203
  162. /* MARH */
  163. #define KS884X_ADDR_4_OFFSET 0x0204
  164. #define KS884X_ADDR_5_OFFSET 0x0205
  165. /* OBCR */
  166. #define KS884X_BUS_CTRL_OFFSET 0x0210
  167. #define BUS_SPEED_125_MHZ 0x0000
  168. #define BUS_SPEED_62_5_MHZ 0x0001
  169. #define BUS_SPEED_41_66_MHZ 0x0002
  170. #define BUS_SPEED_25_MHZ 0x0003
  171. /* EEPCR */
  172. #define KS884X_EEPROM_CTRL_OFFSET 0x0212
  173. #define EEPROM_CHIP_SELECT 0x0001
  174. #define EEPROM_SERIAL_CLOCK 0x0002
  175. #define EEPROM_DATA_OUT 0x0004
  176. #define EEPROM_DATA_IN 0x0008
  177. #define EEPROM_ACCESS_ENABLE 0x0010
  178. /* MBIR */
  179. #define KS884X_MEM_INFO_OFFSET 0x0214
  180. #define RX_MEM_TEST_FAILED 0x0008
  181. #define RX_MEM_TEST_FINISHED 0x0010
  182. #define TX_MEM_TEST_FAILED 0x0800
  183. #define TX_MEM_TEST_FINISHED 0x1000
  184. /* GCR */
  185. #define KS884X_GLOBAL_CTRL_OFFSET 0x0216
  186. #define GLOBAL_SOFTWARE_RESET 0x0001
  187. #define KS8841_POWER_MANAGE_OFFSET 0x0218
  188. /* WFCR */
  189. #define KS8841_WOL_CTRL_OFFSET 0x021A
  190. #define KS8841_WOL_MAGIC_ENABLE 0x0080
  191. #define KS8841_WOL_FRAME3_ENABLE 0x0008
  192. #define KS8841_WOL_FRAME2_ENABLE 0x0004
  193. #define KS8841_WOL_FRAME1_ENABLE 0x0002
  194. #define KS8841_WOL_FRAME0_ENABLE 0x0001
  195. /* WF0 */
  196. #define KS8841_WOL_FRAME_CRC_OFFSET 0x0220
  197. #define KS8841_WOL_FRAME_BYTE0_OFFSET 0x0224
  198. #define KS8841_WOL_FRAME_BYTE2_OFFSET 0x0228
  199. /* IACR */
  200. #define KS884X_IACR_P 0x04A0
  201. #define KS884X_IACR_OFFSET KS884X_IACR_P
  202. /* IADR1 */
  203. #define KS884X_IADR1_P 0x04A2
  204. #define KS884X_IADR2_P 0x04A4
  205. #define KS884X_IADR3_P 0x04A6
  206. #define KS884X_IADR4_P 0x04A8
  207. #define KS884X_IADR5_P 0x04AA
  208. #define KS884X_ACC_CTRL_SEL_OFFSET KS884X_IACR_P
  209. #define KS884X_ACC_CTRL_INDEX_OFFSET (KS884X_ACC_CTRL_SEL_OFFSET + 1)
  210. #define KS884X_ACC_DATA_0_OFFSET KS884X_IADR4_P
  211. #define KS884X_ACC_DATA_1_OFFSET (KS884X_ACC_DATA_0_OFFSET + 1)
  212. #define KS884X_ACC_DATA_2_OFFSET KS884X_IADR5_P
  213. #define KS884X_ACC_DATA_3_OFFSET (KS884X_ACC_DATA_2_OFFSET + 1)
  214. #define KS884X_ACC_DATA_4_OFFSET KS884X_IADR2_P
  215. #define KS884X_ACC_DATA_5_OFFSET (KS884X_ACC_DATA_4_OFFSET + 1)
  216. #define KS884X_ACC_DATA_6_OFFSET KS884X_IADR3_P
  217. #define KS884X_ACC_DATA_7_OFFSET (KS884X_ACC_DATA_6_OFFSET + 1)
  218. #define KS884X_ACC_DATA_8_OFFSET KS884X_IADR1_P
  219. /* P1MBCR */
  220. #define KS884X_P1MBCR_P 0x04D0
  221. #define KS884X_P1MBSR_P 0x04D2
  222. #define KS884X_PHY1ILR_P 0x04D4
  223. #define KS884X_PHY1IHR_P 0x04D6
  224. #define KS884X_P1ANAR_P 0x04D8
  225. #define KS884X_P1ANLPR_P 0x04DA
  226. /* P2MBCR */
  227. #define KS884X_P2MBCR_P 0x04E0
  228. #define KS884X_P2MBSR_P 0x04E2
  229. #define KS884X_PHY2ILR_P 0x04E4
  230. #define KS884X_PHY2IHR_P 0x04E6
  231. #define KS884X_P2ANAR_P 0x04E8
  232. #define KS884X_P2ANLPR_P 0x04EA
  233. #define KS884X_PHY_1_CTRL_OFFSET KS884X_P1MBCR_P
  234. #define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
  235. #define KS884X_PHY_CTRL_OFFSET 0x00
  236. #define KS884X_PHY_STATUS_OFFSET 0x02
  237. #define KS884X_PHY_ID_1_OFFSET 0x04
  238. #define KS884X_PHY_ID_2_OFFSET 0x06
  239. #define KS884X_PHY_AUTO_NEG_OFFSET 0x08
  240. #define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A
  241. /* P1VCT */
  242. #define KS884X_P1VCT_P 0x04F0
  243. #define KS884X_P1PHYCTRL_P 0x04F2
  244. /* P2VCT */
  245. #define KS884X_P2VCT_P 0x04F4
  246. #define KS884X_P2PHYCTRL_P 0x04F6
  247. #define KS884X_PHY_SPECIAL_OFFSET KS884X_P1VCT_P
  248. #define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P)
  249. #define KS884X_PHY_LINK_MD_OFFSET 0x00
  250. #define PHY_START_CABLE_DIAG 0x8000
  251. #define PHY_CABLE_DIAG_RESULT 0x6000
  252. #define PHY_CABLE_STAT_NORMAL 0x0000
  253. #define PHY_CABLE_STAT_OPEN 0x2000
  254. #define PHY_CABLE_STAT_SHORT 0x4000
  255. #define PHY_CABLE_STAT_FAILED 0x6000
  256. #define PHY_CABLE_10M_SHORT 0x1000
  257. #define PHY_CABLE_FAULT_COUNTER 0x01FF
  258. #define KS884X_PHY_PHY_CTRL_OFFSET 0x02
  259. #define PHY_STAT_REVERSED_POLARITY 0x0020
  260. #define PHY_STAT_MDIX 0x0010
  261. #define PHY_FORCE_LINK 0x0008
  262. #define PHY_POWER_SAVING_DISABLE 0x0004
  263. #define PHY_REMOTE_LOOPBACK 0x0002
  264. /* SIDER */
  265. #define KS884X_SIDER_P 0x0400
  266. #define KS884X_CHIP_ID_OFFSET KS884X_SIDER_P
  267. #define KS884X_FAMILY_ID_OFFSET (KS884X_CHIP_ID_OFFSET + 1)
  268. #define REG_FAMILY_ID 0x88
  269. #define REG_CHIP_ID_41 0x8810
  270. #define REG_CHIP_ID_42 0x8800
  271. #define KS884X_CHIP_ID_MASK_41 0xFF10
  272. #define KS884X_CHIP_ID_MASK 0xFFF0
  273. #define KS884X_CHIP_ID_SHIFT 4
  274. #define KS884X_REVISION_MASK 0x000E
  275. #define KS884X_REVISION_SHIFT 1
  276. #define KS8842_START 0x0001
  277. #define CHIP_IP_41_M 0x8810
  278. #define CHIP_IP_42_M 0x8800
  279. #define CHIP_IP_61_M 0x8890
  280. #define CHIP_IP_62_M 0x8880
  281. #define CHIP_IP_41_P 0x8850
  282. #define CHIP_IP_42_P 0x8840
  283. #define CHIP_IP_61_P 0x88D0
  284. #define CHIP_IP_62_P 0x88C0
  285. /* SGCR1 */
  286. #define KS8842_SGCR1_P 0x0402
  287. #define KS8842_SWITCH_CTRL_1_OFFSET KS8842_SGCR1_P
  288. #define SWITCH_PASS_ALL 0x8000
  289. #define SWITCH_TX_FLOW_CTRL 0x2000
  290. #define SWITCH_RX_FLOW_CTRL 0x1000
  291. #define SWITCH_CHECK_LENGTH 0x0800
  292. #define SWITCH_AGING_ENABLE 0x0400
  293. #define SWITCH_FAST_AGING 0x0200
  294. #define SWITCH_AGGR_BACKOFF 0x0100
  295. #define SWITCH_PASS_PAUSE 0x0008
  296. #define SWITCH_LINK_AUTO_AGING 0x0001
  297. /* SGCR2 */
  298. #define KS8842_SGCR2_P 0x0404
  299. #define KS8842_SWITCH_CTRL_2_OFFSET KS8842_SGCR2_P
  300. #define SWITCH_VLAN_ENABLE 0x8000
  301. #define SWITCH_IGMP_SNOOP 0x4000
  302. #define IPV6_MLD_SNOOP_ENABLE 0x2000
  303. #define IPV6_MLD_SNOOP_OPTION 0x1000
  304. #define PRIORITY_SCHEME_SELECT 0x0800
  305. #define SWITCH_MIRROR_RX_TX 0x0100
  306. #define UNICAST_VLAN_BOUNDARY 0x0080
  307. #define MULTICAST_STORM_DISABLE 0x0040
  308. #define SWITCH_BACK_PRESSURE 0x0020
  309. #define FAIR_FLOW_CTRL 0x0010
  310. #define NO_EXC_COLLISION_DROP 0x0008
  311. #define SWITCH_HUGE_PACKET 0x0004
  312. #define SWITCH_LEGAL_PACKET 0x0002
  313. #define SWITCH_BUF_RESERVE 0x0001
  314. /* SGCR3 */
  315. #define KS8842_SGCR3_P 0x0406
  316. #define KS8842_SWITCH_CTRL_3_OFFSET KS8842_SGCR3_P
  317. #define BROADCAST_STORM_RATE_LO 0xFF00
  318. #define SWITCH_REPEATER 0x0080
  319. #define SWITCH_HALF_DUPLEX 0x0040
  320. #define SWITCH_FLOW_CTRL 0x0020
  321. #define SWITCH_10_MBIT 0x0010
  322. #define SWITCH_REPLACE_NULL_VID 0x0008
  323. #define BROADCAST_STORM_RATE_HI 0x0007
  324. #define BROADCAST_STORM_RATE 0x07FF
  325. /* SGCR4 */
  326. #define KS8842_SGCR4_P 0x0408
  327. /* SGCR5 */
  328. #define KS8842_SGCR5_P 0x040A
  329. #define KS8842_SWITCH_CTRL_5_OFFSET KS8842_SGCR5_P
  330. #define LED_MODE 0x8200
  331. #define LED_SPEED_DUPLEX_ACT 0x0000
  332. #define LED_SPEED_DUPLEX_LINK_ACT 0x8000
  333. #define LED_DUPLEX_10_100 0x0200
  334. /* SGCR6 */
  335. #define KS8842_SGCR6_P 0x0410
  336. #define KS8842_SWITCH_CTRL_6_OFFSET KS8842_SGCR6_P
  337. #define KS8842_PRIORITY_MASK 3
  338. #define KS8842_PRIORITY_SHIFT 2
  339. /* SGCR7 */
  340. #define KS8842_SGCR7_P 0x0412
  341. #define KS8842_SWITCH_CTRL_7_OFFSET KS8842_SGCR7_P
  342. #define SWITCH_UNK_DEF_PORT_ENABLE 0x0008
  343. #define SWITCH_UNK_DEF_PORT_3 0x0004
  344. #define SWITCH_UNK_DEF_PORT_2 0x0002
  345. #define SWITCH_UNK_DEF_PORT_1 0x0001
  346. /* MACAR1 */
  347. #define KS8842_MACAR1_P 0x0470
  348. #define KS8842_MACAR2_P 0x0472
  349. #define KS8842_MACAR3_P 0x0474
  350. #define KS8842_MAC_ADDR_1_OFFSET KS8842_MACAR1_P
  351. #define KS8842_MAC_ADDR_0_OFFSET (KS8842_MAC_ADDR_1_OFFSET + 1)
  352. #define KS8842_MAC_ADDR_3_OFFSET KS8842_MACAR2_P
  353. #define KS8842_MAC_ADDR_2_OFFSET (KS8842_MAC_ADDR_3_OFFSET + 1)
  354. #define KS8842_MAC_ADDR_5_OFFSET KS8842_MACAR3_P
  355. #define KS8842_MAC_ADDR_4_OFFSET (KS8842_MAC_ADDR_5_OFFSET + 1)
  356. /* TOSR1 */
  357. #define KS8842_TOSR1_P 0x0480
  358. #define KS8842_TOSR2_P 0x0482
  359. #define KS8842_TOSR3_P 0x0484
  360. #define KS8842_TOSR4_P 0x0486
  361. #define KS8842_TOSR5_P 0x0488
  362. #define KS8842_TOSR6_P 0x048A
  363. #define KS8842_TOSR7_P 0x0490
  364. #define KS8842_TOSR8_P 0x0492
  365. #define KS8842_TOS_1_OFFSET KS8842_TOSR1_P
  366. #define KS8842_TOS_2_OFFSET KS8842_TOSR2_P
  367. #define KS8842_TOS_3_OFFSET KS8842_TOSR3_P
  368. #define KS8842_TOS_4_OFFSET KS8842_TOSR4_P
  369. #define KS8842_TOS_5_OFFSET KS8842_TOSR5_P
  370. #define KS8842_TOS_6_OFFSET KS8842_TOSR6_P
  371. #define KS8842_TOS_7_OFFSET KS8842_TOSR7_P
  372. #define KS8842_TOS_8_OFFSET KS8842_TOSR8_P
  373. /* P1CR1 */
  374. #define KS8842_P1CR1_P 0x0500
  375. #define KS8842_P1CR2_P 0x0502
  376. #define KS8842_P1VIDR_P 0x0504
  377. #define KS8842_P1CR3_P 0x0506
  378. #define KS8842_P1IRCR_P 0x0508
  379. #define KS8842_P1ERCR_P 0x050A
  380. #define KS884X_P1SCSLMD_P 0x0510
  381. #define KS884X_P1CR4_P 0x0512
  382. #define KS884X_P1SR_P 0x0514
  383. /* P2CR1 */
  384. #define KS8842_P2CR1_P 0x0520
  385. #define KS8842_P2CR2_P 0x0522
  386. #define KS8842_P2VIDR_P 0x0524
  387. #define KS8842_P2CR3_P 0x0526
  388. #define KS8842_P2IRCR_P 0x0528
  389. #define KS8842_P2ERCR_P 0x052A
  390. #define KS884X_P2SCSLMD_P 0x0530
  391. #define KS884X_P2CR4_P 0x0532
  392. #define KS884X_P2SR_P 0x0534
  393. /* P3CR1 */
  394. #define KS8842_P3CR1_P 0x0540
  395. #define KS8842_P3CR2_P 0x0542
  396. #define KS8842_P3VIDR_P 0x0544
  397. #define KS8842_P3CR3_P 0x0546
  398. #define KS8842_P3IRCR_P 0x0548
  399. #define KS8842_P3ERCR_P 0x054A
  400. #define KS8842_PORT_1_CTRL_1 KS8842_P1CR1_P
  401. #define KS8842_PORT_2_CTRL_1 KS8842_P2CR1_P
  402. #define KS8842_PORT_3_CTRL_1 KS8842_P3CR1_P
  403. #define PORT_CTRL_ADDR(port, addr) \
  404. (addr = KS8842_PORT_1_CTRL_1 + (port) * \
  405. (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
  406. #define KS8842_PORT_CTRL_1_OFFSET 0x00
  407. #define PORT_BROADCAST_STORM 0x0080
  408. #define PORT_DIFFSERV_ENABLE 0x0040
  409. #define PORT_802_1P_ENABLE 0x0020
  410. #define PORT_BASED_PRIORITY_MASK 0x0018
  411. #define PORT_BASED_PRIORITY_BASE 0x0003
  412. #define PORT_BASED_PRIORITY_SHIFT 3
  413. #define PORT_BASED_PRIORITY_0 0x0000
  414. #define PORT_BASED_PRIORITY_1 0x0008
  415. #define PORT_BASED_PRIORITY_2 0x0010
  416. #define PORT_BASED_PRIORITY_3 0x0018
  417. #define PORT_INSERT_TAG 0x0004
  418. #define PORT_REMOVE_TAG 0x0002
  419. #define PORT_PRIO_QUEUE_ENABLE 0x0001
  420. #define KS8842_PORT_CTRL_2_OFFSET 0x02
  421. #define PORT_INGRESS_VLAN_FILTER 0x4000
  422. #define PORT_DISCARD_NON_VID 0x2000
  423. #define PORT_FORCE_FLOW_CTRL 0x1000
  424. #define PORT_BACK_PRESSURE 0x0800
  425. #define PORT_TX_ENABLE 0x0400
  426. #define PORT_RX_ENABLE 0x0200
  427. #define PORT_LEARN_DISABLE 0x0100
  428. #define PORT_MIRROR_SNIFFER 0x0080
  429. #define PORT_MIRROR_RX 0x0040
  430. #define PORT_MIRROR_TX 0x0020
  431. #define PORT_USER_PRIORITY_CEILING 0x0008
  432. #define PORT_VLAN_MEMBERSHIP 0x0007
  433. #define KS8842_PORT_CTRL_VID_OFFSET 0x04
  434. #define PORT_DEFAULT_VID 0x0001
  435. #define KS8842_PORT_CTRL_3_OFFSET 0x06
  436. #define PORT_INGRESS_LIMIT_MODE 0x000C
  437. #define PORT_INGRESS_ALL 0x0000
  438. #define PORT_INGRESS_UNICAST 0x0004
  439. #define PORT_INGRESS_MULTICAST 0x0008
  440. #define PORT_INGRESS_BROADCAST 0x000C
  441. #define PORT_COUNT_IFG 0x0002
  442. #define PORT_COUNT_PREAMBLE 0x0001
  443. #define KS8842_PORT_IN_RATE_OFFSET 0x08
  444. #define KS8842_PORT_OUT_RATE_OFFSET 0x0A
  445. #define PORT_PRIORITY_RATE 0x0F
  446. #define PORT_PRIORITY_RATE_SHIFT 4
  447. #define KS884X_PORT_LINK_MD 0x10
  448. #define PORT_CABLE_10M_SHORT 0x8000
  449. #define PORT_CABLE_DIAG_RESULT 0x6000
  450. #define PORT_CABLE_STAT_NORMAL 0x0000
  451. #define PORT_CABLE_STAT_OPEN 0x2000
  452. #define PORT_CABLE_STAT_SHORT 0x4000
  453. #define PORT_CABLE_STAT_FAILED 0x6000
  454. #define PORT_START_CABLE_DIAG 0x1000
  455. #define PORT_FORCE_LINK 0x0800
  456. #define PORT_POWER_SAVING_DISABLE 0x0400
  457. #define PORT_PHY_REMOTE_LOOPBACK 0x0200
  458. #define PORT_CABLE_FAULT_COUNTER 0x01FF
  459. #define KS884X_PORT_CTRL_4_OFFSET 0x12
  460. #define PORT_LED_OFF 0x8000
  461. #define PORT_TX_DISABLE 0x4000
  462. #define PORT_AUTO_NEG_RESTART 0x2000
  463. #define PORT_REMOTE_FAULT_DISABLE 0x1000
  464. #define PORT_POWER_DOWN 0x0800
  465. #define PORT_AUTO_MDIX_DISABLE 0x0400
  466. #define PORT_FORCE_MDIX 0x0200
  467. #define PORT_LOOPBACK 0x0100
  468. #define PORT_AUTO_NEG_ENABLE 0x0080
  469. #define PORT_FORCE_100_MBIT 0x0040
  470. #define PORT_FORCE_FULL_DUPLEX 0x0020
  471. #define PORT_AUTO_NEG_SYM_PAUSE 0x0010
  472. #define PORT_AUTO_NEG_100BTX_FD 0x0008
  473. #define PORT_AUTO_NEG_100BTX 0x0004
  474. #define PORT_AUTO_NEG_10BT_FD 0x0002
  475. #define PORT_AUTO_NEG_10BT 0x0001
  476. #define KS884X_PORT_STATUS_OFFSET 0x14
  477. #define PORT_HP_MDIX 0x8000
  478. #define PORT_REVERSED_POLARITY 0x2000
  479. #define PORT_RX_FLOW_CTRL 0x0800
  480. #define PORT_TX_FLOW_CTRL 0x1000
  481. #define PORT_STATUS_SPEED_100MBIT 0x0400
  482. #define PORT_STATUS_FULL_DUPLEX 0x0200
  483. #define PORT_REMOTE_FAULT 0x0100
  484. #define PORT_MDIX_STATUS 0x0080
  485. #define PORT_AUTO_NEG_COMPLETE 0x0040
  486. #define PORT_STATUS_LINK_GOOD 0x0020
  487. #define PORT_REMOTE_SYM_PAUSE 0x0010
  488. #define PORT_REMOTE_100BTX_FD 0x0008
  489. #define PORT_REMOTE_100BTX 0x0004
  490. #define PORT_REMOTE_10BT_FD 0x0002
  491. #define PORT_REMOTE_10BT 0x0001
  492. /*
  493. #define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
  494. #define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000
  495. #define STATIC_MAC_TABLE_VALID 00-00080000-00000000
  496. #define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000
  497. #define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000
  498. #define STATIC_MAC_TABLE_FID 00-03C00000-00000000
  499. */
  500. #define STATIC_MAC_TABLE_ADDR 0x0000FFFF
  501. #define STATIC_MAC_TABLE_FWD_PORTS 0x00070000
  502. #define STATIC_MAC_TABLE_VALID 0x00080000
  503. #define STATIC_MAC_TABLE_OVERRIDE 0x00100000
  504. #define STATIC_MAC_TABLE_USE_FID 0x00200000
  505. #define STATIC_MAC_TABLE_FID 0x03C00000
  506. #define STATIC_MAC_FWD_PORTS_SHIFT 16
  507. #define STATIC_MAC_FID_SHIFT 22
  508. /*
  509. #define VLAN_TABLE_VID 00-00000000-00000FFF
  510. #define VLAN_TABLE_FID 00-00000000-0000F000
  511. #define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000
  512. #define VLAN_TABLE_VALID 00-00000000-00080000
  513. */
  514. #define VLAN_TABLE_VID 0x00000FFF
  515. #define VLAN_TABLE_FID 0x0000F000
  516. #define VLAN_TABLE_MEMBERSHIP 0x00070000
  517. #define VLAN_TABLE_VALID 0x00080000
  518. #define VLAN_TABLE_FID_SHIFT 12
  519. #define VLAN_TABLE_MEMBERSHIP_SHIFT 16
  520. /*
  521. #define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
  522. #define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000
  523. #define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000
  524. #define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000
  525. #define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000
  526. #define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000
  527. #define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000
  528. #define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000
  529. */
  530. #define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF
  531. #define DYNAMIC_MAC_TABLE_FID 0x000F0000
  532. #define DYNAMIC_MAC_TABLE_SRC_PORT 0x00300000
  533. #define DYNAMIC_MAC_TABLE_TIMESTAMP 0x00C00000
  534. #define DYNAMIC_MAC_TABLE_ENTRIES 0xFF000000
  535. #define DYNAMIC_MAC_TABLE_ENTRIES_H 0x03
  536. #define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x04
  537. #define DYNAMIC_MAC_TABLE_RESERVED 0x78
  538. #define DYNAMIC_MAC_TABLE_NOT_READY 0x80
  539. #define DYNAMIC_MAC_FID_SHIFT 16
  540. #define DYNAMIC_MAC_SRC_PORT_SHIFT 20
  541. #define DYNAMIC_MAC_TIMESTAMP_SHIFT 22
  542. #define DYNAMIC_MAC_ENTRIES_SHIFT 24
  543. #define DYNAMIC_MAC_ENTRIES_H_SHIFT 8
  544. /*
  545. #define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
  546. #define MIB_COUNTER_VALID 00-00000000-40000000
  547. #define MIB_COUNTER_OVERFLOW 00-00000000-80000000
  548. */
  549. #define MIB_COUNTER_VALUE 0x3FFFFFFF
  550. #define MIB_COUNTER_VALID 0x40000000
  551. #define MIB_COUNTER_OVERFLOW 0x80000000
  552. #define MIB_PACKET_DROPPED 0x0000FFFF
  553. #define KS_MIB_PACKET_DROPPED_TX_0 0x100
  554. #define KS_MIB_PACKET_DROPPED_TX_1 0x101
  555. #define KS_MIB_PACKET_DROPPED_TX 0x102
  556. #define KS_MIB_PACKET_DROPPED_RX_0 0x103
  557. #define KS_MIB_PACKET_DROPPED_RX_1 0x104
  558. #define KS_MIB_PACKET_DROPPED_RX 0x105
  559. /* Change default LED mode. */
  560. #define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
  561. #define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i))
  562. #define MAX_ETHERNET_BODY_SIZE 1500
  563. #define ETHERNET_HEADER_SIZE (14 + VLAN_HLEN)
  564. #define MAX_ETHERNET_PACKET_SIZE \
  565. (MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
  566. #define REGULAR_RX_BUF_SIZE (MAX_ETHERNET_PACKET_SIZE + 4)
  567. #define MAX_RX_BUF_SIZE (1912 + 4)
  568. #define ADDITIONAL_ENTRIES 16
  569. #define MAX_MULTICAST_LIST 32
  570. #define HW_MULTICAST_SIZE 8
  571. #define HW_TO_DEV_PORT(port) (port - 1)
  572. enum {
  573. media_connected,
  574. media_disconnected
  575. };
  576. enum {
  577. OID_COUNTER_UNKOWN,
  578. OID_COUNTER_FIRST,
  579. /* total transmit errors */
  580. OID_COUNTER_XMIT_ERROR,
  581. /* total receive errors */
  582. OID_COUNTER_RCV_ERROR,
  583. OID_COUNTER_LAST
  584. };
  585. /*
  586. * Hardware descriptor definitions
  587. */
  588. #define DESC_ALIGNMENT 16
  589. #define BUFFER_ALIGNMENT 8
  590. #define NUM_OF_RX_DESC 64
  591. #define NUM_OF_TX_DESC 64
  592. #define KS_DESC_RX_FRAME_LEN 0x000007FF
  593. #define KS_DESC_RX_FRAME_TYPE 0x00008000
  594. #define KS_DESC_RX_ERROR_CRC 0x00010000
  595. #define KS_DESC_RX_ERROR_RUNT 0x00020000
  596. #define KS_DESC_RX_ERROR_TOO_LONG 0x00040000
  597. #define KS_DESC_RX_ERROR_PHY 0x00080000
  598. #define KS884X_DESC_RX_PORT_MASK 0x00300000
  599. #define KS_DESC_RX_MULTICAST 0x01000000
  600. #define KS_DESC_RX_ERROR 0x02000000
  601. #define KS_DESC_RX_ERROR_CSUM_UDP 0x04000000
  602. #define KS_DESC_RX_ERROR_CSUM_TCP 0x08000000
  603. #define KS_DESC_RX_ERROR_CSUM_IP 0x10000000
  604. #define KS_DESC_RX_LAST 0x20000000
  605. #define KS_DESC_RX_FIRST 0x40000000
  606. #define KS_DESC_RX_ERROR_COND \
  607. (KS_DESC_RX_ERROR_CRC | \
  608. KS_DESC_RX_ERROR_RUNT | \
  609. KS_DESC_RX_ERROR_PHY | \
  610. KS_DESC_RX_ERROR_TOO_LONG)
  611. #define KS_DESC_HW_OWNED 0x80000000
  612. #define KS_DESC_BUF_SIZE 0x000007FF
  613. #define KS884X_DESC_TX_PORT_MASK 0x00300000
  614. #define KS_DESC_END_OF_RING 0x02000000
  615. #define KS_DESC_TX_CSUM_GEN_UDP 0x04000000
  616. #define KS_DESC_TX_CSUM_GEN_TCP 0x08000000
  617. #define KS_DESC_TX_CSUM_GEN_IP 0x10000000
  618. #define KS_DESC_TX_LAST 0x20000000
  619. #define KS_DESC_TX_FIRST 0x40000000
  620. #define KS_DESC_TX_INTERRUPT 0x80000000
  621. #define KS_DESC_PORT_SHIFT 20
  622. #define KS_DESC_RX_MASK (KS_DESC_BUF_SIZE)
  623. #define KS_DESC_TX_MASK \
  624. (KS_DESC_TX_INTERRUPT | \
  625. KS_DESC_TX_FIRST | \
  626. KS_DESC_TX_LAST | \
  627. KS_DESC_TX_CSUM_GEN_IP | \
  628. KS_DESC_TX_CSUM_GEN_TCP | \
  629. KS_DESC_TX_CSUM_GEN_UDP | \
  630. KS_DESC_BUF_SIZE)
  631. struct ksz_desc_rx_stat {
  632. #ifdef __BIG_ENDIAN_BITFIELD
  633. u32 hw_owned:1;
  634. u32 first_desc:1;
  635. u32 last_desc:1;
  636. u32 csum_err_ip:1;
  637. u32 csum_err_tcp:1;
  638. u32 csum_err_udp:1;
  639. u32 error:1;
  640. u32 multicast:1;
  641. u32 src_port:4;
  642. u32 err_phy:1;
  643. u32 err_too_long:1;
  644. u32 err_runt:1;
  645. u32 err_crc:1;
  646. u32 frame_type:1;
  647. u32 reserved1:4;
  648. u32 frame_len:11;
  649. #else
  650. u32 frame_len:11;
  651. u32 reserved1:4;
  652. u32 frame_type:1;
  653. u32 err_crc:1;
  654. u32 err_runt:1;
  655. u32 err_too_long:1;
  656. u32 err_phy:1;
  657. u32 src_port:4;
  658. u32 multicast:1;
  659. u32 error:1;
  660. u32 csum_err_udp:1;
  661. u32 csum_err_tcp:1;
  662. u32 csum_err_ip:1;
  663. u32 last_desc:1;
  664. u32 first_desc:1;
  665. u32 hw_owned:1;
  666. #endif
  667. };
  668. struct ksz_desc_tx_stat {
  669. #ifdef __BIG_ENDIAN_BITFIELD
  670. u32 hw_owned:1;
  671. u32 reserved1:31;
  672. #else
  673. u32 reserved1:31;
  674. u32 hw_owned:1;
  675. #endif
  676. };
  677. struct ksz_desc_rx_buf {
  678. #ifdef __BIG_ENDIAN_BITFIELD
  679. u32 reserved4:6;
  680. u32 end_of_ring:1;
  681. u32 reserved3:14;
  682. u32 buf_size:11;
  683. #else
  684. u32 buf_size:11;
  685. u32 reserved3:14;
  686. u32 end_of_ring:1;
  687. u32 reserved4:6;
  688. #endif
  689. };
  690. struct ksz_desc_tx_buf {
  691. #ifdef __BIG_ENDIAN_BITFIELD
  692. u32 intr:1;
  693. u32 first_seg:1;
  694. u32 last_seg:1;
  695. u32 csum_gen_ip:1;
  696. u32 csum_gen_tcp:1;
  697. u32 csum_gen_udp:1;
  698. u32 end_of_ring:1;
  699. u32 reserved4:1;
  700. u32 dest_port:4;
  701. u32 reserved3:9;
  702. u32 buf_size:11;
  703. #else
  704. u32 buf_size:11;
  705. u32 reserved3:9;
  706. u32 dest_port:4;
  707. u32 reserved4:1;
  708. u32 end_of_ring:1;
  709. u32 csum_gen_udp:1;
  710. u32 csum_gen_tcp:1;
  711. u32 csum_gen_ip:1;
  712. u32 last_seg:1;
  713. u32 first_seg:1;
  714. u32 intr:1;
  715. #endif
  716. };
  717. union desc_stat {
  718. struct ksz_desc_rx_stat rx;
  719. struct ksz_desc_tx_stat tx;
  720. u32 data;
  721. };
  722. union desc_buf {
  723. struct ksz_desc_rx_buf rx;
  724. struct ksz_desc_tx_buf tx;
  725. u32 data;
  726. };
  727. /**
  728. * struct ksz_hw_desc - Hardware descriptor data structure
  729. * @ctrl: Descriptor control value.
  730. * @buf: Descriptor buffer value.
  731. * @addr: Physical address of memory buffer.
  732. * @next: Pointer to next hardware descriptor.
  733. */
  734. struct ksz_hw_desc {
  735. union desc_stat ctrl;
  736. union desc_buf buf;
  737. u32 addr;
  738. u32 next;
  739. };
  740. /**
  741. * struct ksz_sw_desc - Software descriptor data structure
  742. * @ctrl: Descriptor control value.
  743. * @buf: Descriptor buffer value.
  744. * @buf_size: Current buffers size value in hardware descriptor.
  745. */
  746. struct ksz_sw_desc {
  747. union desc_stat ctrl;
  748. union desc_buf buf;
  749. u32 buf_size;
  750. };
  751. /**
  752. * struct ksz_dma_buf - OS dependent DMA buffer data structure
  753. * @skb: Associated socket buffer.
  754. * @dma: Associated physical DMA address.
  755. * @len: Actual len used.
  756. */
  757. struct ksz_dma_buf {
  758. struct sk_buff *skb;
  759. dma_addr_t dma;
  760. int len;
  761. };
  762. /**
  763. * struct ksz_desc - Descriptor structure
  764. * @phw: Hardware descriptor pointer to uncached physical memory.
  765. * @sw: Cached memory to hold hardware descriptor values for
  766. * manipulation.
  767. * @dma_buf: Operating system dependent data structure to hold physical
  768. * memory buffer allocation information.
  769. */
  770. struct ksz_desc {
  771. struct ksz_hw_desc *phw;
  772. struct ksz_sw_desc sw;
  773. struct ksz_dma_buf dma_buf;
  774. };
  775. #define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf))
  776. /**
  777. * struct ksz_desc_info - Descriptor information data structure
  778. * @ring: First descriptor in the ring.
  779. * @cur: Current descriptor being manipulated.
  780. * @ring_virt: First hardware descriptor in the ring.
  781. * @ring_phys: The physical address of the first descriptor of the ring.
  782. * @size: Size of hardware descriptor.
  783. * @alloc: Number of descriptors allocated.
  784. * @avail: Number of descriptors available for use.
  785. * @last: Index for last descriptor released to hardware.
  786. * @next: Index for next descriptor available for use.
  787. * @mask: Mask for index wrapping.
  788. */
  789. struct ksz_desc_info {
  790. struct ksz_desc *ring;
  791. struct ksz_desc *cur;
  792. struct ksz_hw_desc *ring_virt;
  793. u32 ring_phys;
  794. int size;
  795. int alloc;
  796. int avail;
  797. int last;
  798. int next;
  799. int mask;
  800. };
  801. /*
  802. * KSZ8842 switch definitions
  803. */
  804. enum {
  805. TABLE_STATIC_MAC = 0,
  806. TABLE_VLAN,
  807. TABLE_DYNAMIC_MAC,
  808. TABLE_MIB
  809. };
  810. #define LEARNED_MAC_TABLE_ENTRIES 1024
  811. #define STATIC_MAC_TABLE_ENTRIES 8
  812. /**
  813. * struct ksz_mac_table - Static MAC table data structure
  814. * @mac_addr: MAC address to filter.
  815. * @vid: VID value.
  816. * @fid: FID value.
  817. * @ports: Port membership.
  818. * @override: Override setting.
  819. * @use_fid: FID use setting.
  820. * @valid: Valid setting indicating the entry is being used.
  821. */
  822. struct ksz_mac_table {
  823. u8 mac_addr[ETH_ALEN];
  824. u16 vid;
  825. u8 fid;
  826. u8 ports;
  827. u8 override:1;
  828. u8 use_fid:1;
  829. u8 valid:1;
  830. };
  831. #define VLAN_TABLE_ENTRIES 16
  832. /**
  833. * struct ksz_vlan_table - VLAN table data structure
  834. * @vid: VID value.
  835. * @fid: FID value.
  836. * @member: Port membership.
  837. */
  838. struct ksz_vlan_table {
  839. u16 vid;
  840. u8 fid;
  841. u8 member;
  842. };
  843. #define DIFFSERV_ENTRIES 64
  844. #define PRIO_802_1P_ENTRIES 8
  845. #define PRIO_QUEUES 4
  846. #define SWITCH_PORT_NUM 2
  847. #define TOTAL_PORT_NUM (SWITCH_PORT_NUM + 1)
  848. #define HOST_MASK (1 << SWITCH_PORT_NUM)
  849. #define PORT_MASK 7
  850. #define MAIN_PORT 0
  851. #define OTHER_PORT 1
  852. #define HOST_PORT SWITCH_PORT_NUM
  853. #define PORT_COUNTER_NUM 0x20
  854. #define TOTAL_PORT_COUNTER_NUM (PORT_COUNTER_NUM + 2)
  855. #define MIB_COUNTER_RX_LO_PRIORITY 0x00
  856. #define MIB_COUNTER_RX_HI_PRIORITY 0x01
  857. #define MIB_COUNTER_RX_UNDERSIZE 0x02
  858. #define MIB_COUNTER_RX_FRAGMENT 0x03
  859. #define MIB_COUNTER_RX_OVERSIZE 0x04
  860. #define MIB_COUNTER_RX_JABBER 0x05
  861. #define MIB_COUNTER_RX_SYMBOL_ERR 0x06
  862. #define MIB_COUNTER_RX_CRC_ERR 0x07
  863. #define MIB_COUNTER_RX_ALIGNMENT_ERR 0x08
  864. #define MIB_COUNTER_RX_CTRL_8808 0x09
  865. #define MIB_COUNTER_RX_PAUSE 0x0A
  866. #define MIB_COUNTER_RX_BROADCAST 0x0B
  867. #define MIB_COUNTER_RX_MULTICAST 0x0C
  868. #define MIB_COUNTER_RX_UNICAST 0x0D
  869. #define MIB_COUNTER_RX_OCTET_64 0x0E
  870. #define MIB_COUNTER_RX_OCTET_65_127 0x0F
  871. #define MIB_COUNTER_RX_OCTET_128_255 0x10
  872. #define MIB_COUNTER_RX_OCTET_256_511 0x11
  873. #define MIB_COUNTER_RX_OCTET_512_1023 0x12
  874. #define MIB_COUNTER_RX_OCTET_1024_1522 0x13
  875. #define MIB_COUNTER_TX_LO_PRIORITY 0x14
  876. #define MIB_COUNTER_TX_HI_PRIORITY 0x15
  877. #define MIB_COUNTER_TX_LATE_COLLISION 0x16
  878. #define MIB_COUNTER_TX_PAUSE 0x17
  879. #define MIB_COUNTER_TX_BROADCAST 0x18
  880. #define MIB_COUNTER_TX_MULTICAST 0x19
  881. #define MIB_COUNTER_TX_UNICAST 0x1A
  882. #define MIB_COUNTER_TX_DEFERRED 0x1B
  883. #define MIB_COUNTER_TX_TOTAL_COLLISION 0x1C
  884. #define MIB_COUNTER_TX_EXCESS_COLLISION 0x1D
  885. #define MIB_COUNTER_TX_SINGLE_COLLISION 0x1E
  886. #define MIB_COUNTER_TX_MULTI_COLLISION 0x1F
  887. #define MIB_COUNTER_RX_DROPPED_PACKET 0x20
  888. #define MIB_COUNTER_TX_DROPPED_PACKET 0x21
  889. /**
  890. * struct ksz_port_mib - Port MIB data structure
  891. * @cnt_ptr: Current pointer to MIB counter index.
  892. * @link_down: Indication the link has just gone down.
  893. * @state: Connection status of the port.
  894. * @mib_start: The starting counter index. Some ports do not start at 0.
  895. * @counter: 64-bit MIB counter value.
  896. * @dropped: Temporary buffer to remember last read packet dropped values.
  897. *
  898. * MIB counters needs to be read periodically so that counters do not get
  899. * overflowed and give incorrect values. A right balance is needed to
  900. * satisfy this condition and not waste too much CPU time.
  901. *
  902. * It is pointless to read MIB counters when the port is disconnected. The
  903. * @state provides the connection status so that MIB counters are read only
  904. * when the port is connected. The @link_down indicates the port is just
  905. * disconnected so that all MIB counters are read one last time to update the
  906. * information.
  907. */
  908. struct ksz_port_mib {
  909. u8 cnt_ptr;
  910. u8 link_down;
  911. u8 state;
  912. u8 mib_start;
  913. u64 counter[TOTAL_PORT_COUNTER_NUM];
  914. u32 dropped[2];
  915. };
  916. /**
  917. * struct ksz_port_cfg - Port configuration data structure
  918. * @vid: VID value.
  919. * @member: Port membership.
  920. * @port_prio: Port priority.
  921. * @rx_rate: Receive priority rate.
  922. * @tx_rate: Transmit priority rate.
  923. * @stp_state: Current Spanning Tree Protocol state.
  924. */
  925. struct ksz_port_cfg {
  926. u16 vid;
  927. u8 member;
  928. u8 port_prio;
  929. u32 rx_rate[PRIO_QUEUES];
  930. u32 tx_rate[PRIO_QUEUES];
  931. int stp_state;
  932. };
  933. /**
  934. * struct ksz_switch - KSZ8842 switch data structure
  935. * @mac_table: MAC table entries information.
  936. * @vlan_table: VLAN table entries information.
  937. * @port_cfg: Port configuration information.
  938. * @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS
  939. * (bit7 ~ bit2) field.
  940. * @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p
  941. * Tag priority field.
  942. * @br_addr: Bridge address. Used for STP.
  943. * @other_addr: Other MAC address. Used for multiple network device mode.
  944. * @broad_per: Broadcast storm percentage.
  945. * @member: Current port membership. Used for STP.
  946. */
  947. struct ksz_switch {
  948. struct ksz_mac_table mac_table[STATIC_MAC_TABLE_ENTRIES];
  949. struct ksz_vlan_table vlan_table[VLAN_TABLE_ENTRIES];
  950. struct ksz_port_cfg port_cfg[TOTAL_PORT_NUM];
  951. u8 diffserv[DIFFSERV_ENTRIES];
  952. u8 p_802_1p[PRIO_802_1P_ENTRIES];
  953. u8 br_addr[ETH_ALEN];
  954. u8 other_addr[ETH_ALEN];
  955. u8 broad_per;
  956. u8 member;
  957. };
  958. #define TX_RATE_UNIT 10000
  959. /**
  960. * struct ksz_port_info - Port information data structure
  961. * @state: Connection status of the port.
  962. * @tx_rate: Transmit rate divided by 10000 to get Mbit.
  963. * @duplex: Duplex mode.
  964. * @advertised: Advertised auto-negotiation setting. Used to determine link.
  965. * @partner: Auto-negotiation partner setting. Used to determine link.
  966. * @port_id: Port index to access actual hardware register.
  967. * @pdev: Pointer to OS dependent network device.
  968. */
  969. struct ksz_port_info {
  970. uint state;
  971. uint tx_rate;
  972. u8 duplex;
  973. u8 advertised;
  974. u8 partner;
  975. u8 port_id;
  976. void *pdev;
  977. };
  978. #define MAX_TX_HELD_SIZE 52000
  979. /* Hardware features and bug fixes. */
  980. #define LINK_INT_WORKING (1 << 0)
  981. #define SMALL_PACKET_TX_BUG (1 << 1)
  982. #define HALF_DUPLEX_SIGNAL_BUG (1 << 2)
  983. #define RX_HUGE_FRAME (1 << 4)
  984. #define STP_SUPPORT (1 << 8)
  985. /* Software overrides. */
  986. #define PAUSE_FLOW_CTRL (1 << 0)
  987. #define FAST_AGING (1 << 1)
  988. /**
  989. * struct ksz_hw - KSZ884X hardware data structure
  990. * @io: Virtual address assigned.
  991. * @ksz_switch: Pointer to KSZ8842 switch.
  992. * @port_info: Port information.
  993. * @port_mib: Port MIB information.
  994. * @dev_count: Number of network devices this hardware supports.
  995. * @dst_ports: Destination ports in switch for transmission.
  996. * @id: Hardware ID. Used for display only.
  997. * @mib_cnt: Number of MIB counters this hardware has.
  998. * @mib_port_cnt: Number of ports with MIB counters.
  999. * @tx_cfg: Cached transmit control settings.
  1000. * @rx_cfg: Cached receive control settings.
  1001. * @intr_mask: Current interrupt mask.
  1002. * @intr_set: Current interrup set.
  1003. * @intr_blocked: Interrupt blocked.
  1004. * @rx_desc_info: Receive descriptor information.
  1005. * @tx_desc_info: Transmit descriptor information.
  1006. * @tx_int_cnt: Transmit interrupt count. Used for TX optimization.
  1007. * @tx_int_mask: Transmit interrupt mask. Used for TX optimization.
  1008. * @tx_size: Transmit data size. Used for TX optimization.
  1009. * The maximum is defined by MAX_TX_HELD_SIZE.
  1010. * @perm_addr: Permanent MAC address.
  1011. * @override_addr: Overridden MAC address.
  1012. * @address: Additional MAC address entries.
  1013. * @addr_list_size: Additional MAC address list size.
  1014. * @mac_override: Indication of MAC address overridden.
  1015. * @promiscuous: Counter to keep track of promiscuous mode set.
  1016. * @all_multi: Counter to keep track of all multicast mode set.
  1017. * @multi_list: Multicast address entries.
  1018. * @multi_bits: Cached multicast hash table settings.
  1019. * @multi_list_size: Multicast address list size.
  1020. * @enabled: Indication of hardware enabled.
  1021. * @rx_stop: Indication of receive process stop.
  1022. * @reserved2: none
  1023. * @features: Hardware features to enable.
  1024. * @overrides: Hardware features to override.
  1025. * @parent: Pointer to parent, network device private structure.
  1026. */
  1027. struct ksz_hw {
  1028. void __iomem *io;
  1029. struct ksz_switch *ksz_switch;
  1030. struct ksz_port_info port_info[SWITCH_PORT_NUM];
  1031. struct ksz_port_mib port_mib[TOTAL_PORT_NUM];
  1032. int dev_count;
  1033. int dst_ports;
  1034. int id;
  1035. int mib_cnt;
  1036. int mib_port_cnt;
  1037. u32 tx_cfg;
  1038. u32 rx_cfg;
  1039. u32 intr_mask;
  1040. u32 intr_set;
  1041. uint intr_blocked;
  1042. struct ksz_desc_info rx_desc_info;
  1043. struct ksz_desc_info tx_desc_info;
  1044. int tx_int_cnt;
  1045. int tx_int_mask;
  1046. int tx_size;
  1047. u8 perm_addr[ETH_ALEN];
  1048. u8 override_addr[ETH_ALEN];
  1049. u8 address[ADDITIONAL_ENTRIES][ETH_ALEN];
  1050. u8 addr_list_size;
  1051. u8 mac_override;
  1052. u8 promiscuous;
  1053. u8 all_multi;
  1054. u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN];
  1055. u8 multi_bits[HW_MULTICAST_SIZE];
  1056. u8 multi_list_size;
  1057. u8 enabled;
  1058. u8 rx_stop;
  1059. u8 reserved2[1];
  1060. uint features;
  1061. uint overrides;
  1062. void *parent;
  1063. };
  1064. enum {
  1065. PHY_NO_FLOW_CTRL,
  1066. PHY_FLOW_CTRL,
  1067. PHY_TX_ONLY,
  1068. PHY_RX_ONLY
  1069. };
  1070. /**
  1071. * struct ksz_port - Virtual port data structure
  1072. * @duplex: Duplex mode setting. 1 for half duplex, 2 for full
  1073. * duplex, and 0 for auto, which normally results in full
  1074. * duplex.
  1075. * @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and
  1076. * 0 for auto, which normally results in 100 Mbit.
  1077. * @force_link: Force link setting. 0 for auto-negotiation, and 1 for
  1078. * force.
  1079. * @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow
  1080. * control, and PHY_FLOW_CTRL for flow control.
  1081. * PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100
  1082. * Mbit PHY.
  1083. * @first_port: Index of first port this port supports.
  1084. * @mib_port_cnt: Number of ports with MIB counters.
  1085. * @port_cnt: Number of ports this port supports.
  1086. * @counter: Port statistics counter.
  1087. * @hw: Pointer to hardware structure.
  1088. * @linked: Pointer to port information linked to this port.
  1089. */
  1090. struct ksz_port {
  1091. u8 duplex;
  1092. u8 speed;
  1093. u8 force_link;
  1094. u8 flow_ctrl;
  1095. int first_port;
  1096. int mib_port_cnt;
  1097. int port_cnt;
  1098. u64 counter[OID_COUNTER_LAST];
  1099. struct ksz_hw *hw;
  1100. struct ksz_port_info *linked;
  1101. };
  1102. /**
  1103. * struct ksz_timer_info - Timer information data structure
  1104. * @timer: Kernel timer.
  1105. * @cnt: Running timer counter.
  1106. * @max: Number of times to run timer; -1 for infinity.
  1107. * @period: Timer period in jiffies.
  1108. */
  1109. struct ksz_timer_info {
  1110. struct timer_list timer;
  1111. int cnt;
  1112. int max;
  1113. int period;
  1114. };
  1115. /**
  1116. * struct ksz_shared_mem - OS dependent shared memory data structure
  1117. * @dma_addr: Physical DMA address allocated.
  1118. * @alloc_size: Allocation size.
  1119. * @phys: Actual physical address used.
  1120. * @alloc_virt: Virtual address allocated.
  1121. * @virt: Actual virtual address used.
  1122. */
  1123. struct ksz_shared_mem {
  1124. dma_addr_t dma_addr;
  1125. uint alloc_size;
  1126. uint phys;
  1127. u8 *alloc_virt;
  1128. u8 *virt;
  1129. };
  1130. /**
  1131. * struct ksz_counter_info - OS dependent counter information data structure
  1132. * @counter: Wait queue to wakeup after counters are read.
  1133. * @time: Next time in jiffies to read counter.
  1134. * @read: Indication of counters read in full or not.
  1135. */
  1136. struct ksz_counter_info {
  1137. wait_queue_head_t counter;
  1138. unsigned long time;
  1139. int read;
  1140. };
  1141. /**
  1142. * struct dev_info - Network device information data structure
  1143. * @dev: Pointer to network device.
  1144. * @pdev: Pointer to PCI device.
  1145. * @hw: Hardware structure.
  1146. * @desc_pool: Physical memory used for descriptor pool.
  1147. * @hwlock: Spinlock to prevent hardware from accessing.
  1148. * @lock: Mutex lock to prevent device from accessing.
  1149. * @dev_rcv: Receive process function used.
  1150. * @last_skb: Socket buffer allocated for descriptor rx fragments.
  1151. * @skb_index: Buffer index for receiving fragments.
  1152. * @skb_len: Buffer length for receiving fragments.
  1153. * @mib_read: Workqueue to read MIB counters.
  1154. * @mib_timer_info: Timer to read MIB counters.
  1155. * @counter: Used for MIB reading.
  1156. * @mtu: Current MTU used. The default is REGULAR_RX_BUF_SIZE;
  1157. * the maximum is MAX_RX_BUF_SIZE.
  1158. * @opened: Counter to keep track of device open.
  1159. * @rx_tasklet: Receive processing tasklet.
  1160. * @tx_tasklet: Transmit processing tasklet.
  1161. * @wol_enable: Wake-on-LAN enable set by ethtool.
  1162. * @wol_support: Wake-on-LAN support used by ethtool.
  1163. * @pme_wait: Used for KSZ8841 power management.
  1164. */
  1165. struct dev_info {
  1166. struct net_device *dev;
  1167. struct pci_dev *pdev;
  1168. struct ksz_hw hw;
  1169. struct ksz_shared_mem desc_pool;
  1170. spinlock_t hwlock;
  1171. struct mutex lock;
  1172. int (*dev_rcv)(struct dev_info *);
  1173. struct sk_buff *last_skb;
  1174. int skb_index;
  1175. int skb_len;
  1176. struct work_struct mib_read;
  1177. struct ksz_timer_info mib_timer_info;
  1178. struct ksz_counter_info counter[TOTAL_PORT_NUM];
  1179. int mtu;
  1180. int opened;
  1181. struct tasklet_struct rx_tasklet;
  1182. struct tasklet_struct tx_tasklet;
  1183. int wol_enable;
  1184. int wol_support;
  1185. unsigned long pme_wait;
  1186. };
  1187. /**
  1188. * struct dev_priv - Network device private data structure
  1189. * @adapter: Adapter device information.
  1190. * @port: Port information.
  1191. * @monitor_timer_info: Timer to monitor ports.
  1192. * @proc_sem: Semaphore for proc accessing.
  1193. * @id: Device ID.
  1194. * @mii_if: MII interface information.
  1195. * @advertising: Temporary variable to store advertised settings.
  1196. * @msg_enable: The message flags controlling driver output.
  1197. * @media_state: The connection status of the device.
  1198. * @multicast: The all multicast state of the device.
  1199. * @promiscuous: The promiscuous state of the device.
  1200. */
  1201. struct dev_priv {
  1202. struct dev_info *adapter;
  1203. struct ksz_port port;
  1204. struct ksz_timer_info monitor_timer_info;
  1205. struct semaphore proc_sem;
  1206. int id;
  1207. struct mii_if_info mii_if;
  1208. u32 advertising;
  1209. u32 msg_enable;
  1210. int media_state;
  1211. int multicast;
  1212. int promiscuous;
  1213. };
  1214. #define DRV_NAME "KSZ884X PCI"
  1215. #define DEVICE_NAME "KSZ884x PCI"
  1216. #define DRV_VERSION "1.0.0"
  1217. #define DRV_RELDATE "Feb 8, 2010"
  1218. static char version[] =
  1219. "Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")";
  1220. static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 };
  1221. /*
  1222. * Interrupt processing primary routines
  1223. */
  1224. static inline void hw_ack_intr(struct ksz_hw *hw, uint interrupt)
  1225. {
  1226. writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS);
  1227. }
  1228. static inline void hw_dis_intr(struct ksz_hw *hw)
  1229. {
  1230. hw->intr_blocked = hw->intr_mask;
  1231. writel(0, hw->io + KS884X_INTERRUPTS_ENABLE);
  1232. hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
  1233. }
  1234. static inline void hw_set_intr(struct ksz_hw *hw, uint interrupt)
  1235. {
  1236. hw->intr_set = interrupt;
  1237. writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE);
  1238. }
  1239. static inline void hw_ena_intr(struct ksz_hw *hw)
  1240. {
  1241. hw->intr_blocked = 0;
  1242. hw_set_intr(hw, hw->intr_mask);
  1243. }
  1244. static inline void hw_dis_intr_bit(struct ksz_hw *hw, uint bit)
  1245. {
  1246. hw->intr_mask &= ~(bit);
  1247. }
  1248. static inline void hw_turn_off_intr(struct ksz_hw *hw, uint interrupt)
  1249. {
  1250. u32 read_intr;
  1251. read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
  1252. hw->intr_set = read_intr & ~interrupt;
  1253. writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
  1254. hw_dis_intr_bit(hw, interrupt);
  1255. }
  1256. /**
  1257. * hw_turn_on_intr - turn on specified interrupts
  1258. * @hw: The hardware instance.
  1259. * @bit: The interrupt bits to be on.
  1260. *
  1261. * This routine turns on the specified interrupts in the interrupt mask so that
  1262. * those interrupts will be enabled.
  1263. */
  1264. static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit)
  1265. {
  1266. hw->intr_mask |= bit;
  1267. if (!hw->intr_blocked)
  1268. hw_set_intr(hw, hw->intr_mask);
  1269. }
  1270. static inline void hw_ena_intr_bit(struct ksz_hw *hw, uint interrupt)
  1271. {
  1272. u32 read_intr;
  1273. read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
  1274. hw->intr_set = read_intr | interrupt;
  1275. writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
  1276. }
  1277. static inline void hw_read_intr(struct ksz_hw *hw, uint *status)
  1278. {
  1279. *status = readl(hw->io + KS884X_INTERRUPTS_STATUS);
  1280. *status = *status & hw->intr_set;
  1281. }
  1282. static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt)
  1283. {
  1284. if (interrupt)
  1285. hw_ena_intr(hw);
  1286. }
  1287. /**
  1288. * hw_block_intr - block hardware interrupts
  1289. * @hw: The hardware instance.
  1290. *
  1291. * This function blocks all interrupts of the hardware and returns the current
  1292. * interrupt enable mask so that interrupts can be restored later.
  1293. *
  1294. * Return the current interrupt enable mask.
  1295. */
  1296. static uint hw_block_intr(struct ksz_hw *hw)
  1297. {
  1298. uint interrupt = 0;
  1299. if (!hw->intr_blocked) {
  1300. hw_dis_intr(hw);
  1301. interrupt = hw->intr_blocked;
  1302. }
  1303. return interrupt;
  1304. }
  1305. /*
  1306. * Hardware descriptor routines
  1307. */
  1308. static inline void reset_desc(struct ksz_desc *desc, union desc_stat status)
  1309. {
  1310. status.rx.hw_owned = 0;
  1311. desc->phw->ctrl.data = cpu_to_le32(status.data);
  1312. }
  1313. static inline void release_desc(struct ksz_desc *desc)
  1314. {
  1315. desc->sw.ctrl.tx.hw_owned = 1;
  1316. if (desc->sw.buf_size != desc->sw.buf.data) {
  1317. desc->sw.buf_size = desc->sw.buf.data;
  1318. desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data);
  1319. }
  1320. desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data);
  1321. }
  1322. static void get_rx_pkt(struct ksz_desc_info *info, struct ksz_desc **desc)
  1323. {
  1324. *desc = &info->ring[info->last];
  1325. info->last++;
  1326. info->last &= info->mask;
  1327. info->avail--;
  1328. (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK;
  1329. }
  1330. static inline void set_rx_buf(struct ksz_desc *desc, u32 addr)
  1331. {
  1332. desc->phw->addr = cpu_to_le32(addr);
  1333. }
  1334. static inline void set_rx_len(struct ksz_desc *desc, u32 len)
  1335. {
  1336. desc->sw.buf.rx.buf_size = len;
  1337. }
  1338. static inline void get_tx_pkt(struct ksz_desc_info *info,
  1339. struct ksz_desc **desc)
  1340. {
  1341. *desc = &info->ring[info->next];
  1342. info->next++;
  1343. info->next &= info->mask;
  1344. info->avail--;
  1345. (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK;
  1346. }
  1347. static inline void set_tx_buf(struct ksz_desc *desc, u32 addr)
  1348. {
  1349. desc->phw->addr = cpu_to_le32(addr);
  1350. }
  1351. static inline void set_tx_len(struct ksz_desc *desc, u32 len)
  1352. {
  1353. desc->sw.buf.tx.buf_size = len;
  1354. }
  1355. /* Switch functions */
  1356. #define TABLE_READ 0x10
  1357. #define TABLE_SEL_SHIFT 2
  1358. #define HW_DELAY(hw, reg) \
  1359. do { \
  1360. readw(hw->io + reg); \
  1361. } while (0)
  1362. /**
  1363. * sw_r_table - read 4 bytes of data from switch table
  1364. * @hw: The hardware instance.
  1365. * @table: The table selector.
  1366. * @addr: The address of the table entry.
  1367. * @data: Buffer to store the read data.
  1368. *
  1369. * This routine reads 4 bytes of data from the table of the switch.
  1370. * Hardware interrupts are disabled to minimize corruption of read data.
  1371. */
  1372. static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data)
  1373. {
  1374. u16 ctrl_addr;
  1375. uint interrupt;
  1376. ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr;
  1377. interrupt = hw_block_intr(hw);
  1378. writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
  1379. HW_DELAY(hw, KS884X_IACR_OFFSET);
  1380. *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
  1381. hw_restore_intr(hw, interrupt);
  1382. }
  1383. /**
  1384. * sw_w_table_64 - write 8 bytes of data to the switch table
  1385. * @hw: The hardware instance.
  1386. * @table: The table selector.
  1387. * @addr: The address of the table entry.
  1388. * @data_hi: The high part of data to be written (bit63 ~ bit32).
  1389. * @data_lo: The low part of data to be written (bit31 ~ bit0).
  1390. *
  1391. * This routine writes 8 bytes of data to the table of the switch.
  1392. * Hardware interrupts are disabled to minimize corruption of written data.
  1393. */
  1394. static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi,
  1395. u32 data_lo)
  1396. {
  1397. u16 ctrl_addr;
  1398. uint interrupt;
  1399. ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr;
  1400. interrupt = hw_block_intr(hw);
  1401. writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET);
  1402. writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET);
  1403. writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
  1404. HW_DELAY(hw, KS884X_IACR_OFFSET);
  1405. hw_restore_intr(hw, interrupt);
  1406. }
  1407. /**
  1408. * sw_w_sta_mac_table - write to the static MAC table
  1409. * @hw: The hardware instance.
  1410. * @addr: The address of the table entry.
  1411. * @mac_addr: The MAC address.
  1412. * @ports: The port members.
  1413. * @override: The flag to override the port receive/transmit settings.
  1414. * @valid: The flag to indicate entry is valid.
  1415. * @use_fid: The flag to indicate the FID is valid.
  1416. * @fid: The FID value.
  1417. *
  1418. * This routine writes an entry of the static MAC table of the switch. It
  1419. * calls sw_w_table_64() to write the data.
  1420. */
  1421. static void sw_w_sta_mac_table(struct ksz_hw *hw, u16 addr, u8 *mac_addr,
  1422. u8 ports, int override, int valid, int use_fid, u8 fid)
  1423. {
  1424. u32 data_hi;
  1425. u32 data_lo;
  1426. data_lo = ((u32) mac_addr[2] << 24) |
  1427. ((u32) mac_addr[3] << 16) |
  1428. ((u32) mac_addr[4] << 8) | mac_addr[5];
  1429. data_hi = ((u32) mac_addr[0] << 8) | mac_addr[1];
  1430. data_hi |= (u32) ports << STATIC_MAC_FWD_PORTS_SHIFT;
  1431. if (override)
  1432. data_hi |= STATIC_MAC_TABLE_OVERRIDE;
  1433. if (use_fid) {
  1434. data_hi |= STATIC_MAC_TABLE_USE_FID;
  1435. data_hi |= (u32) fid << STATIC_MAC_FID_SHIFT;
  1436. }
  1437. if (valid)
  1438. data_hi |= STATIC_MAC_TABLE_VALID;
  1439. sw_w_table_64(hw, TABLE_STATIC_MAC, addr, data_hi, data_lo);
  1440. }
  1441. /**
  1442. * sw_r_vlan_table - read from the VLAN table
  1443. * @hw: The hardware instance.
  1444. * @addr: The address of the table entry.
  1445. * @vid: Buffer to store the VID.
  1446. * @fid: Buffer to store the VID.
  1447. * @member: Buffer to store the port membership.
  1448. *
  1449. * This function reads an entry of the VLAN table of the switch. It calls
  1450. * sw_r_table() to get the data.
  1451. *
  1452. * Return 0 if the entry is valid; otherwise -1.
  1453. */
  1454. static int sw_r_vlan_table(struct ksz_hw *hw, u16 addr, u16 *vid, u8 *fid,
  1455. u8 *member)
  1456. {
  1457. u32 data;
  1458. sw_r_table(hw, TABLE_VLAN, addr, &data);
  1459. if (data & VLAN_TABLE_VALID) {
  1460. *vid = (u16)(data & VLAN_TABLE_VID);
  1461. *fid = (u8)((data & VLAN_TABLE_FID) >> VLAN_TABLE_FID_SHIFT);
  1462. *member = (u8)((data & VLAN_TABLE_MEMBERSHIP) >>
  1463. VLAN_TABLE_MEMBERSHIP_SHIFT);
  1464. return 0;
  1465. }
  1466. return -1;
  1467. }
  1468. /**
  1469. * port_r_mib_cnt - read MIB counter
  1470. * @hw: The hardware instance.
  1471. * @port: The port index.
  1472. * @addr: The address of the counter.
  1473. * @cnt: Buffer to store the counter.
  1474. *
  1475. * This routine reads a MIB counter of the port.
  1476. * Hardware interrupts are disabled to minimize corruption of read data.
  1477. */
  1478. static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt)
  1479. {
  1480. u32 data;
  1481. u16 ctrl_addr;
  1482. uint interrupt;
  1483. int timeout;
  1484. ctrl_addr = addr + PORT_COUNTER_NUM * port;
  1485. interrupt = hw_block_intr(hw);
  1486. ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) << 8);
  1487. writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
  1488. HW_DELAY(hw, KS884X_IACR_OFFSET);
  1489. for (timeout = 100; timeout > 0; timeout--) {
  1490. data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
  1491. if (data & MIB_COUNTER_VALID) {
  1492. if (data & MIB_COUNTER_OVERFLOW)
  1493. *cnt += MIB_COUNTER_VALUE + 1;
  1494. *cnt += data & MIB_COUNTER_VALUE;
  1495. break;
  1496. }
  1497. }
  1498. hw_restore_intr(hw, interrupt);
  1499. }
  1500. /**
  1501. * port_r_mib_pkt - read dropped packet counts
  1502. * @hw: The hardware instance.
  1503. * @port: The port index.
  1504. * @last: last one
  1505. * @cnt: Buffer to store the receive and transmit dropped packet counts.
  1506. *
  1507. * This routine reads the dropped packet counts of the port.
  1508. * Hardware interrupts are disabled to minimize corruption of read data.
  1509. */
  1510. static void port_r_mib_pkt(struct ksz_hw *hw, int port, u32 *last, u64 *cnt)
  1511. {
  1512. u32 cur;
  1513. u32 data;
  1514. u16 ctrl_addr;
  1515. uint interrupt;
  1516. int index;
  1517. index = KS_MIB_PACKET_DROPPED_RX_0 + port;
  1518. do {
  1519. interrupt = hw_block_intr(hw);
  1520. ctrl_addr = (u16) index;
  1521. ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ)
  1522. << 8);
  1523. writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
  1524. HW_DELAY(hw, KS884X_IACR_OFFSET);
  1525. data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
  1526. hw_restore_intr(hw, interrupt);
  1527. data &= MIB_PACKET_DROPPED;
  1528. cur = *last;
  1529. if (data != cur) {
  1530. *last = data;
  1531. if (data < cur)
  1532. data += MIB_PACKET_DROPPED + 1;
  1533. data -= cur;
  1534. *cnt += data;
  1535. }
  1536. ++last;
  1537. ++cnt;
  1538. index -= KS_MIB_PACKET_DROPPED_TX -
  1539. KS_MIB_PACKET_DROPPED_TX_0 + 1;
  1540. } while (index >= KS_MIB_PACKET_DROPPED_TX_0 + port);
  1541. }
  1542. /**
  1543. * port_r_cnt - read MIB counters periodically
  1544. * @hw: The hardware instance.
  1545. * @port: The port index.
  1546. *
  1547. * This routine is used to read the counters of the port periodically to avoid
  1548. * counter overflow. The hardware should be acquired first before calling this
  1549. * routine.
  1550. *
  1551. * Return non-zero when not all counters not read.
  1552. */
  1553. static int port_r_cnt(struct ksz_hw *hw, int port)
  1554. {
  1555. struct ksz_port_mib *mib = &hw->port_mib[port];
  1556. if (mib->mib_start < PORT_COUNTER_NUM)
  1557. while (mib->cnt_ptr < PORT_COUNTER_NUM) {
  1558. port_r_mib_cnt(hw, port, mib->cnt_ptr,
  1559. &mib->counter[mib->cnt_ptr]);
  1560. ++mib->cnt_ptr;
  1561. }
  1562. if (hw->mib_cnt > PORT_COUNTER_NUM)
  1563. port_r_mib_pkt(hw, port, mib->dropped,
  1564. &mib->counter[PORT_COUNTER_NUM]);
  1565. mib->cnt_ptr = 0;
  1566. return 0;
  1567. }
  1568. /**
  1569. * port_init_cnt - initialize MIB counter values
  1570. * @hw: The hardware instance.
  1571. * @port: The port index.
  1572. *
  1573. * This routine is used to initialize all counters to zero if the hardware
  1574. * cannot do it after reset.
  1575. */
  1576. static void port_init_cnt(struct ksz_hw *hw, int port)
  1577. {
  1578. struct ksz_port_mib *mib = &hw->port_mib[port];
  1579. mib->cnt_ptr = 0;
  1580. if (mib->mib_start < PORT_COUNTER_NUM)
  1581. do {
  1582. port_r_mib_cnt(hw, port, mib->cnt_ptr,
  1583. &mib->counter[mib->cnt_ptr]);
  1584. ++mib->cnt_ptr;
  1585. } while (mib->cnt_ptr < PORT_COUNTER_NUM);
  1586. if (hw->mib_cnt > PORT_COUNTER_NUM)
  1587. port_r_mib_pkt(hw, port, mib->dropped,
  1588. &mib->counter[PORT_COUNTER_NUM]);
  1589. memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
  1590. mib->cnt_ptr = 0;
  1591. }
  1592. /*
  1593. * Port functions
  1594. */
  1595. /**
  1596. * port_chk - check port register bits
  1597. * @hw: The hardware instance.
  1598. * @port: The port index.
  1599. * @offset: The offset of the port register.
  1600. * @bits: The data bits to check.
  1601. *
  1602. * This function checks whether the specified bits of the port register are set
  1603. * or not.
  1604. *
  1605. * Return 0 if the bits are not set.
  1606. */
  1607. static int port_chk(struct ksz_hw *hw, int port, int offset, u16 bits)
  1608. {
  1609. u32 addr;
  1610. u16 data;
  1611. PORT_CTRL_ADDR(port, addr);
  1612. addr += offset;
  1613. data = readw(hw->io + addr);
  1614. return (data & bits) == bits;
  1615. }
  1616. /**
  1617. * port_cfg - set port register bits
  1618. * @hw: The hardware instance.
  1619. * @port: The port index.
  1620. * @offset: The offset of the port register.
  1621. * @bits: The data bits to set.
  1622. * @set: The flag indicating whether the bits are to be set or not.
  1623. *
  1624. * This routine sets or resets the specified bits of the port register.
  1625. */
  1626. static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
  1627. int set)
  1628. {
  1629. u32 addr;
  1630. u16 data;
  1631. PORT_CTRL_ADDR(port, addr);
  1632. addr += offset;
  1633. data = readw(hw->io + addr);
  1634. if (set)
  1635. data |= bits;
  1636. else
  1637. data &= ~bits;
  1638. writew(data, hw->io + addr);
  1639. }
  1640. /**
  1641. * port_chk_shift - check port bit
  1642. * @hw: The hardware instance.
  1643. * @port: The port index.
  1644. * @addr: The offset of the register.
  1645. * @shift: Number of bits to shift.
  1646. *
  1647. * This function checks whether the specified port is set in the register or
  1648. * not.
  1649. *
  1650. * Return 0 if the port is not set.
  1651. */
  1652. static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift)
  1653. {
  1654. u16 data;
  1655. u16 bit = 1 << port;
  1656. data = readw(hw->io + addr);
  1657. data >>= shift;
  1658. return (data & bit) == bit;
  1659. }
  1660. /**
  1661. * port_cfg_shift - set port bit
  1662. * @hw: The hardware instance.
  1663. * @port: The port index.
  1664. * @addr: The offset of the register.
  1665. * @shift: Number of bits to shift.
  1666. * @set: The flag indicating whether the port is to be set or not.
  1667. *
  1668. * This routine sets or resets the specified port in the register.
  1669. */
  1670. static void port_cfg_shift(struct ksz_hw *hw, int port, u32 addr, int shift,
  1671. int set)
  1672. {
  1673. u16 data;
  1674. u16 bits = 1 << port;
  1675. data = readw(hw->io + addr);
  1676. bits <<= shift;
  1677. if (set)
  1678. data |= bits;
  1679. else
  1680. data &= ~bits;
  1681. writew(data, hw->io + addr);
  1682. }
  1683. /**
  1684. * port_r8 - read byte from port register
  1685. * @hw: The hardware instance.
  1686. * @port: The port index.
  1687. * @offset: The offset of the port register.
  1688. * @data: Buffer to store the data.
  1689. *
  1690. * This routine reads a byte from the port register.
  1691. */
  1692. static void port_r8(struct ksz_hw *hw, int port, int offset, u8 *data)
  1693. {
  1694. u32 addr;
  1695. PORT_CTRL_ADDR(port, addr);
  1696. addr += offset;
  1697. *data = readb(hw->io + addr);
  1698. }
  1699. /**
  1700. * port_r16 - read word from port register.
  1701. * @hw: The hardware instance.
  1702. * @port: The port index.
  1703. * @offset: The offset of the port register.
  1704. * @data: Buffer to store the data.
  1705. *
  1706. * This routine reads a word from the port register.
  1707. */
  1708. static void port_r16(struct ksz_hw *hw, int port, int offset, u16 *data)
  1709. {
  1710. u32 addr;
  1711. PORT_CTRL_ADDR(port, addr);
  1712. addr += offset;
  1713. *data = readw(hw->io + addr);
  1714. }
  1715. /**
  1716. * port_w16 - write word to port register.
  1717. * @hw: The hardware instance.
  1718. * @port: The port index.
  1719. * @offset: The offset of the port register.
  1720. * @data: Data to write.
  1721. *
  1722. * This routine writes a word to the port register.
  1723. */
  1724. static void port_w16(struct ksz_hw *hw, int port, int offset, u16 data)
  1725. {
  1726. u32 addr;
  1727. PORT_CTRL_ADDR(port, addr);
  1728. addr += offset;
  1729. writew(data, hw->io + addr);
  1730. }
  1731. /**
  1732. * sw_chk - check switch register bits
  1733. * @hw: The hardware instance.
  1734. * @addr: The address of the switch register.
  1735. * @bits: The data bits to check.
  1736. *
  1737. * This function checks whether the specified bits of the switch register are
  1738. * set or not.
  1739. *
  1740. * Return 0 if the bits are not set.
  1741. */
  1742. static int sw_chk(struct ksz_hw *hw, u32 addr, u16 bits)
  1743. {
  1744. u16 data;
  1745. data = readw(hw->io + addr);
  1746. return (data & bits) == bits;
  1747. }
  1748. /**
  1749. * sw_cfg - set switch register bits
  1750. * @hw: The hardware instance.
  1751. * @addr: The address of the switch register.
  1752. * @bits: The data bits to set.
  1753. * @set: The flag indicating whether the bits are to be set or not.
  1754. *
  1755. * This function sets or resets the specified bits of the switch register.
  1756. */
  1757. static void sw_cfg(struct ksz_hw *hw, u32 addr, u16 bits, int set)
  1758. {
  1759. u16 data;
  1760. data = readw(hw->io + addr);
  1761. if (set)
  1762. data |= bits;
  1763. else
  1764. data &= ~bits;
  1765. writew(data, hw->io + addr);
  1766. }
  1767. /* Bandwidth */
  1768. static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set)
  1769. {
  1770. port_cfg(hw, p,
  1771. KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set);
  1772. }
  1773. static inline int port_chk_broad_storm(struct ksz_hw *hw, int p)
  1774. {
  1775. return port_chk(hw, p,
  1776. KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM);
  1777. }
  1778. /* Driver set switch broadcast storm protection at 10% rate. */
  1779. #define BROADCAST_STORM_PROTECTION_RATE 10
  1780. /* 148,800 frames * 67 ms / 100 */
  1781. #define BROADCAST_STORM_VALUE 9969
  1782. /**
  1783. * sw_cfg_broad_storm - configure broadcast storm threshold
  1784. * @hw: The hardware instance.
  1785. * @percent: Broadcast storm threshold in percent of transmit rate.
  1786. *
  1787. * This routine configures the broadcast storm threshold of the switch.
  1788. */
  1789. static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
  1790. {
  1791. u16 data;
  1792. u32 value = ((u32) BROADCAST_STORM_VALUE * (u32) percent / 100);
  1793. if (value > BROADCAST_STORM_RATE)
  1794. value = BROADCAST_STORM_RATE;
  1795. data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  1796. data &= ~(BROADCAST_STORM_RATE_LO | BROADCAST_STORM_RATE_HI);
  1797. data |= ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
  1798. writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  1799. }
  1800. /**
  1801. * sw_get_broad_storm - get broadcast storm threshold
  1802. * @hw: The hardware instance.
  1803. * @percent: Buffer to store the broadcast storm threshold percentage.
  1804. *
  1805. * This routine retrieves the broadcast storm threshold of the switch.
  1806. */
  1807. static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent)
  1808. {
  1809. int num;
  1810. u16 data;
  1811. data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  1812. num = (data & BROADCAST_STORM_RATE_HI);
  1813. num <<= 8;
  1814. num |= (data & BROADCAST_STORM_RATE_LO) >> 8;
  1815. num = DIV_ROUND_CLOSEST(num * 100, BROADCAST_STORM_VALUE);
  1816. *percent = (u8) num;
  1817. }
  1818. /**
  1819. * sw_dis_broad_storm - disable broadstorm
  1820. * @hw: The hardware instance.
  1821. * @port: The port index.
  1822. *
  1823. * This routine disables the broadcast storm limit function of the switch.
  1824. */
  1825. static void sw_dis_broad_storm(struct ksz_hw *hw, int port)
  1826. {
  1827. port_cfg_broad_storm(hw, port, 0);
  1828. }
  1829. /**
  1830. * sw_ena_broad_storm - enable broadcast storm
  1831. * @hw: The hardware instance.
  1832. * @port: The port index.
  1833. *
  1834. * This routine enables the broadcast storm limit function of the switch.
  1835. */
  1836. static void sw_ena_broad_storm(struct ksz_hw *hw, int port)
  1837. {
  1838. sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
  1839. port_cfg_broad_storm(hw, port, 1);
  1840. }
  1841. /**
  1842. * sw_init_broad_storm - initialize broadcast storm
  1843. * @hw: The hardware instance.
  1844. *
  1845. * This routine initializes the broadcast storm limit function of the switch.
  1846. */
  1847. static void sw_init_broad_storm(struct ksz_hw *hw)
  1848. {
  1849. int port;
  1850. hw->ksz_switch->broad_per = 1;
  1851. sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
  1852. for (port = 0; port < TOTAL_PORT_NUM; port++)
  1853. sw_dis_broad_storm(hw, port);
  1854. sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, MULTICAST_STORM_DISABLE, 1);
  1855. }
  1856. /**
  1857. * hw_cfg_broad_storm - configure broadcast storm
  1858. * @hw: The hardware instance.
  1859. * @percent: Broadcast storm threshold in percent of transmit rate.
  1860. *
  1861. * This routine configures the broadcast storm threshold of the switch.
  1862. * It is called by user functions. The hardware should be acquired first.
  1863. */
  1864. static void hw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
  1865. {
  1866. if (percent > 100)
  1867. percent = 100;
  1868. sw_cfg_broad_storm(hw, percent);
  1869. sw_get_broad_storm(hw, &percent);
  1870. hw->ksz_switch->broad_per = percent;
  1871. }
  1872. /**
  1873. * sw_dis_prio_rate - disable switch priority rate
  1874. * @hw: The hardware instance.
  1875. * @port: The port index.
  1876. *
  1877. * This routine disables the priority rate function of the switch.
  1878. */
  1879. static void sw_dis_prio_rate(struct ksz_hw *hw, int port)
  1880. {
  1881. u32 addr;
  1882. PORT_CTRL_ADDR(port, addr);
  1883. addr += KS8842_PORT_IN_RATE_OFFSET;
  1884. writel(0, hw->io + addr);
  1885. }
  1886. /**
  1887. * sw_init_prio_rate - initialize switch prioirty rate
  1888. * @hw: The hardware instance.
  1889. *
  1890. * This routine initializes the priority rate function of the switch.
  1891. */
  1892. static void sw_init_prio_rate(struct ksz_hw *hw)
  1893. {
  1894. int port;
  1895. int prio;
  1896. struct ksz_switch *sw = hw->ksz_switch;
  1897. for (port = 0; port < TOTAL_PORT_NUM; port++) {
  1898. for (prio = 0; prio < PRIO_QUEUES; prio++) {
  1899. sw->port_cfg[port].rx_rate[prio] =
  1900. sw->port_cfg[port].tx_rate[prio] = 0;
  1901. }
  1902. sw_dis_prio_rate(hw, port);
  1903. }
  1904. }
  1905. /* Communication */
  1906. static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set)
  1907. {
  1908. port_cfg(hw, p,
  1909. KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set);
  1910. }
  1911. static inline void port_cfg_force_flow_ctrl(struct ksz_hw *hw, int p, int set)
  1912. {
  1913. port_cfg(hw, p,
  1914. KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL, set);
  1915. }
  1916. static inline int port_chk_back_pressure(struct ksz_hw *hw, int p)
  1917. {
  1918. return port_chk(hw, p,
  1919. KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE);
  1920. }
  1921. static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
  1922. {
  1923. return port_chk(hw, p,
  1924. KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL);
  1925. }
  1926. /* Spanning Tree */
  1927. static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
  1928. {
  1929. port_cfg(hw, p,
  1930. KS8842_PORT_CTRL_2_OFFSET, PORT_RX_ENABLE, set);
  1931. }
  1932. static inline void port_cfg_tx(struct ksz_hw *hw, int p, int set)
  1933. {
  1934. port_cfg(hw, p,
  1935. KS8842_PORT_CTRL_2_OFFSET, PORT_TX_ENABLE, set);
  1936. }
  1937. static inline void sw_cfg_fast_aging(struct ksz_hw *hw, int set)
  1938. {
  1939. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, SWITCH_FAST_AGING, set);
  1940. }
  1941. static inline void sw_flush_dyn_mac_table(struct ksz_hw *hw)
  1942. {
  1943. if (!(hw->overrides & FAST_AGING)) {
  1944. sw_cfg_fast_aging(hw, 1);
  1945. mdelay(1);
  1946. sw_cfg_fast_aging(hw, 0);
  1947. }
  1948. }
  1949. /* VLAN */
  1950. static inline void port_cfg_ins_tag(struct ksz_hw *hw, int p, int insert)
  1951. {
  1952. port_cfg(hw, p,
  1953. KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG, insert);
  1954. }
  1955. static inline void port_cfg_rmv_tag(struct ksz_hw *hw, int p, int remove)
  1956. {
  1957. port_cfg(hw, p,
  1958. KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG, remove);
  1959. }
  1960. static inline int port_chk_ins_tag(struct ksz_hw *hw, int p)
  1961. {
  1962. return port_chk(hw, p,
  1963. KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG);
  1964. }
  1965. static inline int port_chk_rmv_tag(struct ksz_hw *hw, int p)
  1966. {
  1967. return port_chk(hw, p,
  1968. KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG);
  1969. }
  1970. static inline void port_cfg_dis_non_vid(struct ksz_hw *hw, int p, int set)
  1971. {
  1972. port_cfg(hw, p,
  1973. KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID, set);
  1974. }
  1975. static inline void port_cfg_in_filter(struct ksz_hw *hw, int p, int set)
  1976. {
  1977. port_cfg(hw, p,
  1978. KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER, set);
  1979. }
  1980. static inline int port_chk_dis_non_vid(struct ksz_hw *hw, int p)
  1981. {
  1982. return port_chk(hw, p,
  1983. KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID);
  1984. }
  1985. static inline int port_chk_in_filter(struct ksz_hw *hw, int p)
  1986. {
  1987. return port_chk(hw, p,
  1988. KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER);
  1989. }
  1990. /* Mirroring */
  1991. static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set)
  1992. {
  1993. port_cfg(hw, p,
  1994. KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_SNIFFER, set);
  1995. }
  1996. static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set)
  1997. {
  1998. port_cfg(hw, p,
  1999. KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_RX, set);
  2000. }
  2001. static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set)
  2002. {
  2003. port_cfg(hw, p,
  2004. KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_TX, set);
  2005. }
  2006. static inline void sw_cfg_mirror_rx_tx(struct ksz_hw *hw, int set)
  2007. {
  2008. sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, SWITCH_MIRROR_RX_TX, set);
  2009. }
  2010. static void sw_init_mirror(struct ksz_hw *hw)
  2011. {
  2012. int port;
  2013. for (port = 0; port < TOTAL_PORT_NUM; port++) {
  2014. port_cfg_mirror_sniffer(hw, port, 0);
  2015. port_cfg_mirror_rx(hw, port, 0);
  2016. port_cfg_mirror_tx(hw, port, 0);
  2017. }
  2018. sw_cfg_mirror_rx_tx(hw, 0);
  2019. }
  2020. static inline void sw_cfg_unk_def_deliver(struct ksz_hw *hw, int set)
  2021. {
  2022. sw_cfg(hw, KS8842_SWITCH_CTRL_7_OFFSET,
  2023. SWITCH_UNK_DEF_PORT_ENABLE, set);
  2024. }
  2025. static inline int sw_cfg_chk_unk_def_deliver(struct ksz_hw *hw)
  2026. {
  2027. return sw_chk(hw, KS8842_SWITCH_CTRL_7_OFFSET,
  2028. SWITCH_UNK_DEF_PORT_ENABLE);
  2029. }
  2030. static inline void sw_cfg_unk_def_port(struct ksz_hw *hw, int port, int set)
  2031. {
  2032. port_cfg_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0, set);
  2033. }
  2034. static inline int sw_chk_unk_def_port(struct ksz_hw *hw, int port)
  2035. {
  2036. return port_chk_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0);
  2037. }
  2038. /* Priority */
  2039. static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set)
  2040. {
  2041. port_cfg(hw, p,
  2042. KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE, set);
  2043. }
  2044. static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set)
  2045. {
  2046. port_cfg(hw, p,
  2047. KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE, set);
  2048. }
  2049. static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set)
  2050. {
  2051. port_cfg(hw, p,
  2052. KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING, set);
  2053. }
  2054. static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set)
  2055. {
  2056. port_cfg(hw, p,
  2057. KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set);
  2058. }
  2059. static inline int port_chk_diffserv(struct ksz_hw *hw, int p)
  2060. {
  2061. return port_chk(hw, p,
  2062. KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE);
  2063. }
  2064. static inline int port_chk_802_1p(struct ksz_hw *hw, int p)
  2065. {
  2066. return port_chk(hw, p,
  2067. KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE);
  2068. }
  2069. static inline int port_chk_replace_vid(struct ksz_hw *hw, int p)
  2070. {
  2071. return port_chk(hw, p,
  2072. KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING);
  2073. }
  2074. static inline int port_chk_prio(struct ksz_hw *hw, int p)
  2075. {
  2076. return port_chk(hw, p,
  2077. KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE);
  2078. }
  2079. /**
  2080. * sw_dis_diffserv - disable switch DiffServ priority
  2081. * @hw: The hardware instance.
  2082. * @port: The port index.
  2083. *
  2084. * This routine disables the DiffServ priority function of the switch.
  2085. */
  2086. static void sw_dis_diffserv(struct ksz_hw *hw, int port)
  2087. {
  2088. port_cfg_diffserv(hw, port, 0);
  2089. }
  2090. /**
  2091. * sw_dis_802_1p - disable switch 802.1p priority
  2092. * @hw: The hardware instance.
  2093. * @port: The port index.
  2094. *
  2095. * This routine disables the 802.1p priority function of the switch.
  2096. */
  2097. static void sw_dis_802_1p(struct ksz_hw *hw, int port)
  2098. {
  2099. port_cfg_802_1p(hw, port, 0);
  2100. }
  2101. /**
  2102. * sw_cfg_replace_null_vid -
  2103. * @hw: The hardware instance.
  2104. * @set: The flag to disable or enable.
  2105. *
  2106. */
  2107. static void sw_cfg_replace_null_vid(struct ksz_hw *hw, int set)
  2108. {
  2109. sw_cfg(hw, KS8842_SWITCH_CTRL_3_OFFSET, SWITCH_REPLACE_NULL_VID, set);
  2110. }
  2111. /**
  2112. * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
  2113. * @hw: The hardware instance.
  2114. * @port: The port index.
  2115. * @set: The flag to disable or enable.
  2116. *
  2117. * This routine enables the 802.1p priority re-mapping function of the switch.
  2118. * That allows 802.1p priority field to be replaced with the port's default
  2119. * tag's priority value if the ingress packet's 802.1p priority has a higher
  2120. * priority than port's default tag's priority.
  2121. */
  2122. static void sw_cfg_replace_vid(struct ksz_hw *hw, int port, int set)
  2123. {
  2124. port_cfg_replace_vid(hw, port, set);
  2125. }
  2126. /**
  2127. * sw_cfg_port_based - configure switch port based priority
  2128. * @hw: The hardware instance.
  2129. * @port: The port index.
  2130. * @prio: The priority to set.
  2131. *
  2132. * This routine configures the port based priority of the switch.
  2133. */
  2134. static void sw_cfg_port_based(struct ksz_hw *hw, int port, u8 prio)
  2135. {
  2136. u16 data;
  2137. if (prio > PORT_BASED_PRIORITY_BASE)
  2138. prio = PORT_BASED_PRIORITY_BASE;
  2139. hw->ksz_switch->port_cfg[port].port_prio = prio;
  2140. port_r16(hw, port, KS8842_PORT_CTRL_1_OFFSET, &data);
  2141. data &= ~PORT_BASED_PRIORITY_MASK;
  2142. data |= prio << PORT_BASED_PRIORITY_SHIFT;
  2143. port_w16(hw, port, KS8842_PORT_CTRL_1_OFFSET, data);
  2144. }
  2145. /**
  2146. * sw_dis_multi_queue - disable transmit multiple queues
  2147. * @hw: The hardware instance.
  2148. * @port: The port index.
  2149. *
  2150. * This routine disables the transmit multiple queues selection of the switch
  2151. * port. Only single transmit queue on the port.
  2152. */
  2153. static void sw_dis_multi_queue(struct ksz_hw *hw, int port)
  2154. {
  2155. port_cfg_prio(hw, port, 0);
  2156. }
  2157. /**
  2158. * sw_init_prio - initialize switch priority
  2159. * @hw: The hardware instance.
  2160. *
  2161. * This routine initializes the switch QoS priority functions.
  2162. */
  2163. static void sw_init_prio(struct ksz_hw *hw)
  2164. {
  2165. int port;
  2166. int tos;
  2167. struct ksz_switch *sw = hw->ksz_switch;
  2168. /*
  2169. * Init all the 802.1p tag priority value to be assigned to different
  2170. * priority queue.
  2171. */
  2172. sw->p_802_1p[0] = 0;
  2173. sw->p_802_1p[1] = 0;
  2174. sw->p_802_1p[2] = 1;
  2175. sw->p_802_1p[3] = 1;
  2176. sw->p_802_1p[4] = 2;
  2177. sw->p_802_1p[5] = 2;
  2178. sw->p_802_1p[6] = 3;
  2179. sw->p_802_1p[7] = 3;
  2180. /*
  2181. * Init all the DiffServ priority value to be assigned to priority
  2182. * queue 0.
  2183. */
  2184. for (tos = 0; tos < DIFFSERV_ENTRIES; tos++)
  2185. sw->diffserv[tos] = 0;
  2186. /* All QoS functions disabled. */
  2187. for (port = 0; port < TOTAL_PORT_NUM; port++) {
  2188. sw_dis_multi_queue(hw, port);
  2189. sw_dis_diffserv(hw, port);
  2190. sw_dis_802_1p(hw, port);
  2191. sw_cfg_replace_vid(hw, port, 0);
  2192. sw->port_cfg[port].port_prio = 0;
  2193. sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio);
  2194. }
  2195. sw_cfg_replace_null_vid(hw, 0);
  2196. }
  2197. /**
  2198. * port_get_def_vid - get port default VID.
  2199. * @hw: The hardware instance.
  2200. * @port: The port index.
  2201. * @vid: Buffer to store the VID.
  2202. *
  2203. * This routine retrieves the default VID of the port.
  2204. */
  2205. static void port_get_def_vid(struct ksz_hw *hw, int port, u16 *vid)
  2206. {
  2207. u32 addr;
  2208. PORT_CTRL_ADDR(port, addr);
  2209. addr += KS8842_PORT_CTRL_VID_OFFSET;
  2210. *vid = readw(hw->io + addr);
  2211. }
  2212. /**
  2213. * sw_init_vlan - initialize switch VLAN
  2214. * @hw: The hardware instance.
  2215. *
  2216. * This routine initializes the VLAN function of the switch.
  2217. */
  2218. static void sw_init_vlan(struct ksz_hw *hw)
  2219. {
  2220. int port;
  2221. int entry;
  2222. struct ksz_switch *sw = hw->ksz_switch;
  2223. /* Read 16 VLAN entries from device's VLAN table. */
  2224. for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) {
  2225. sw_r_vlan_table(hw, entry,
  2226. &sw->vlan_table[entry].vid,
  2227. &sw->vlan_table[entry].fid,
  2228. &sw->vlan_table[entry].member);
  2229. }
  2230. for (port = 0; port < TOTAL_PORT_NUM; port++) {
  2231. port_get_def_vid(hw, port, &sw->port_cfg[port].vid);
  2232. sw->port_cfg[port].member = PORT_MASK;
  2233. }
  2234. }
  2235. /**
  2236. * sw_cfg_port_base_vlan - configure port-based VLAN membership
  2237. * @hw: The hardware instance.
  2238. * @port: The port index.
  2239. * @member: The port-based VLAN membership.
  2240. *
  2241. * This routine configures the port-based VLAN membership of the port.
  2242. */
  2243. static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member)
  2244. {
  2245. u32 addr;
  2246. u8 data;
  2247. PORT_CTRL_ADDR(port, addr);
  2248. addr += KS8842_PORT_CTRL_2_OFFSET;
  2249. data = readb(hw->io + addr);
  2250. data &= ~PORT_VLAN_MEMBERSHIP;
  2251. data |= (member & PORT_MASK);
  2252. writeb(data, hw->io + addr);
  2253. hw->ksz_switch->port_cfg[port].member = member;
  2254. }
  2255. /**
  2256. * sw_get_addr - get the switch MAC address.
  2257. * @hw: The hardware instance.
  2258. * @mac_addr: Buffer to store the MAC address.
  2259. *
  2260. * This function retrieves the MAC address of the switch.
  2261. */
  2262. static inline void sw_get_addr(struct ksz_hw *hw, u8 *mac_addr)
  2263. {
  2264. int i;
  2265. for (i = 0; i < 6; i += 2) {
  2266. mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
  2267. mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
  2268. }
  2269. }
  2270. /**
  2271. * sw_set_addr - configure switch MAC address
  2272. * @hw: The hardware instance.
  2273. * @mac_addr: The MAC address.
  2274. *
  2275. * This function configures the MAC address of the switch.
  2276. */
  2277. static void sw_set_addr(struct ksz_hw *hw, u8 *mac_addr)
  2278. {
  2279. int i;
  2280. for (i = 0; i < 6; i += 2) {
  2281. writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
  2282. writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
  2283. }
  2284. }
  2285. /**
  2286. * sw_set_global_ctrl - set switch global control
  2287. * @hw: The hardware instance.
  2288. *
  2289. * This routine sets the global control of the switch function.
  2290. */
  2291. static void sw_set_global_ctrl(struct ksz_hw *hw)
  2292. {
  2293. u16 data;
  2294. /* Enable switch MII flow control. */
  2295. data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  2296. data |= SWITCH_FLOW_CTRL;
  2297. writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
  2298. data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
  2299. /* Enable aggressive back off algorithm in half duplex mode. */
  2300. data |= SWITCH_AGGR_BACKOFF;
  2301. /* Enable automatic fast aging when link changed detected. */
  2302. data |= SWITCH_AGING_ENABLE;
  2303. data |= SWITCH_LINK_AUTO_AGING;
  2304. if (hw->overrides & FAST_AGING)
  2305. data |= SWITCH_FAST_AGING;
  2306. else
  2307. data &= ~SWITCH_FAST_AGING;
  2308. writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
  2309. data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
  2310. /* Enable no excessive collision drop. */
  2311. data |= NO_EXC_COLLISION_DROP;
  2312. writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
  2313. }
  2314. enum {
  2315. STP_STATE_DISABLED = 0,
  2316. STP_STATE_LISTENING,
  2317. STP_STATE_LEARNING,
  2318. STP_STATE_FORWARDING,
  2319. STP_STATE_BLOCKED,
  2320. STP_STATE_SIMPLE
  2321. };
  2322. /**
  2323. * port_set_stp_state - configure port spanning tree state
  2324. * @hw: The hardware instance.
  2325. * @port: The port index.
  2326. * @state: The spanning tree state.
  2327. *
  2328. * This routine configures the spanning tree state of the port.
  2329. */
  2330. static void port_set_stp_state(struct ksz_hw *hw, int port, int state)
  2331. {
  2332. u16 data;
  2333. port_r16(hw, port, KS8842_PORT_CTRL_2_OFFSET, &data);
  2334. switch (state) {
  2335. case STP_STATE_DISABLED:
  2336. data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
  2337. data |= PORT_LEARN_DISABLE;
  2338. break;
  2339. case STP_STATE_LISTENING:
  2340. /*
  2341. * No need to turn on transmit because of port direct mode.
  2342. * Turning on receive is required if static MAC table is not setup.
  2343. */
  2344. data &= ~PORT_TX_ENABLE;
  2345. data |= PORT_RX_ENABLE;
  2346. data |= PORT_LEARN_DISABLE;
  2347. break;
  2348. case STP_STATE_LEARNING:
  2349. data &= ~PORT_TX_ENABLE;
  2350. data |= PORT_RX_ENABLE;
  2351. data &= ~PORT_LEARN_DISABLE;
  2352. break;
  2353. case STP_STATE_FORWARDING:
  2354. data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
  2355. data &= ~PORT_LEARN_DISABLE;
  2356. break;
  2357. case STP_STATE_BLOCKED:
  2358. /*
  2359. * Need to setup static MAC table with override to keep receiving BPDU
  2360. * messages. See sw_init_stp routine.
  2361. */
  2362. data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
  2363. data |= PORT_LEARN_DISABLE;
  2364. break;
  2365. case STP_STATE_SIMPLE:
  2366. data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
  2367. data |= PORT_LEARN_DISABLE;
  2368. break;
  2369. }
  2370. port_w16(hw, port, KS8842_PORT_CTRL_2_OFFSET, data);
  2371. hw->ksz_switch->port_cfg[port].stp_state = state;
  2372. }
  2373. #define STP_ENTRY 0
  2374. #define BROADCAST_ENTRY 1
  2375. #define BRIDGE_ADDR_ENTRY 2
  2376. #define IPV6_ADDR_ENTRY 3
  2377. /**
  2378. * sw_clr_sta_mac_table - clear static MAC table
  2379. * @hw: The hardware instance.
  2380. *
  2381. * This routine clears the static MAC table.
  2382. */
  2383. static void sw_clr_sta_mac_table(struct ksz_hw *hw)
  2384. {
  2385. struct ksz_mac_table *entry;
  2386. int i;
  2387. for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) {
  2388. entry = &hw->ksz_switch->mac_table[i];
  2389. sw_w_sta_mac_table(hw, i,
  2390. entry->mac_addr, entry->ports,
  2391. entry->override, 0,
  2392. entry->use_fid, entry->fid);
  2393. }
  2394. }
  2395. /**
  2396. * sw_init_stp - initialize switch spanning tree support
  2397. * @hw: The hardware instance.
  2398. *
  2399. * This routine initializes the spanning tree support of the switch.
  2400. */
  2401. static void sw_init_stp(struct ksz_hw *hw)
  2402. {
  2403. struct ksz_mac_table *entry;
  2404. entry = &hw->ksz_switch->mac_table[STP_ENTRY];
  2405. entry->mac_addr[0] = 0x01;
  2406. entry->mac_addr[1] = 0x80;
  2407. entry->mac_addr[2] = 0xC2;
  2408. entry->mac_addr[3] = 0x00;
  2409. entry->mac_addr[4] = 0x00;
  2410. entry->mac_addr[5] = 0x00;
  2411. entry->ports = HOST_MASK;
  2412. entry->override = 1;
  2413. entry->valid = 1;
  2414. sw_w_sta_mac_table(hw, STP_ENTRY,
  2415. entry->mac_addr, entry->ports,
  2416. entry->override, entry->valid,
  2417. entry->use_fid, entry->fid);
  2418. }
  2419. /**
  2420. * sw_block_addr - block certain packets from the host port
  2421. * @hw: The hardware instance.
  2422. *
  2423. * This routine blocks certain packets from reaching to the host port.
  2424. */
  2425. static void sw_block_addr(struct ksz_hw *hw)
  2426. {
  2427. struct ksz_mac_table *entry;
  2428. int i;
  2429. for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) {
  2430. entry = &hw->ksz_switch->mac_table[i];
  2431. entry->valid = 0;
  2432. sw_w_sta_mac_table(hw, i,
  2433. entry->mac_addr, entry->ports,
  2434. entry->override, entry->valid,
  2435. entry->use_fid, entry->fid);
  2436. }
  2437. }
  2438. static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data)
  2439. {
  2440. *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
  2441. }
  2442. static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data)
  2443. {
  2444. writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
  2445. }
  2446. static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data)
  2447. {
  2448. *data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET);
  2449. }
  2450. static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data)
  2451. {
  2452. *data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
  2453. }
  2454. static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data)
  2455. {
  2456. writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
  2457. }
  2458. static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data)
  2459. {
  2460. *data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET);
  2461. }
  2462. static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data)
  2463. {
  2464. *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
  2465. }
  2466. static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data)
  2467. {
  2468. writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
  2469. }
  2470. static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data)
  2471. {
  2472. *data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
  2473. }
  2474. static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data)
  2475. {
  2476. writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
  2477. }
  2478. static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data)
  2479. {
  2480. *data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
  2481. }
  2482. static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data)
  2483. {
  2484. writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
  2485. }
  2486. /**
  2487. * hw_r_phy - read data from PHY register
  2488. * @hw: The hardware instance.
  2489. * @port: Port to read.
  2490. * @reg: PHY register to read.
  2491. * @val: Buffer to store the read data.
  2492. *
  2493. * This routine reads data from the PHY register.
  2494. */
  2495. static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val)
  2496. {
  2497. int phy;
  2498. phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
  2499. *val = readw(hw->io + phy);
  2500. }
  2501. /**
  2502. * hw_w_phy - write data to PHY register
  2503. * @hw: The hardware instance.
  2504. * @port: Port to write.
  2505. * @reg: PHY register to write.
  2506. * @val: Word data to write.
  2507. *
  2508. * This routine writes data to the PHY register.
  2509. */
  2510. static void hw_w_phy(struct ksz_hw *hw, int port, u16 reg, u16 val)
  2511. {
  2512. int phy;
  2513. phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
  2514. writew(val, hw->io + phy);
  2515. }
  2516. /*
  2517. * EEPROM access functions
  2518. */
  2519. #define AT93C_CODE 0
  2520. #define AT93C_WR_OFF 0x00
  2521. #define AT93C_WR_ALL 0x10
  2522. #define AT93C_ER_ALL 0x20
  2523. #define AT93C_WR_ON 0x30
  2524. #define AT93C_WRITE 1
  2525. #define AT93C_READ 2
  2526. #define AT93C_ERASE 3
  2527. #define EEPROM_DELAY 4
  2528. static inline void drop_gpio(struct ksz_hw *hw, u8 gpio)
  2529. {
  2530. u16 data;
  2531. data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2532. data &= ~gpio;
  2533. writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2534. }
  2535. static inline void raise_gpio(struct ksz_hw *hw, u8 gpio)
  2536. {
  2537. u16 data;
  2538. data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2539. data |= gpio;
  2540. writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2541. }
  2542. static inline u8 state_gpio(struct ksz_hw *hw, u8 gpio)
  2543. {
  2544. u16 data;
  2545. data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
  2546. return (u8)(data & gpio);
  2547. }
  2548. static void eeprom_clk(struct ksz_hw *hw)
  2549. {
  2550. raise_gpio(hw, EEPROM_SERIAL_CLOCK);
  2551. udelay(EEPROM_DELAY);
  2552. drop_gpio(hw, EEPROM_SERIAL_CLOCK);
  2553. udelay(EEPROM_DELAY);
  2554. }
  2555. static u16 spi_r(struct ksz_hw *hw)
  2556. {
  2557. int i;
  2558. u16 temp = 0;
  2559. for (i = 15; i >= 0; i--) {
  2560. raise_gpio(hw, EEPROM_SERIAL_CLOCK);
  2561. udelay(EEPROM_DELAY);
  2562. temp |= (state_gpio(hw, EEPROM_DATA_IN)) ? 1 << i : 0;
  2563. drop_gpio(hw, EEPROM_SERIAL_CLOCK);
  2564. udelay(EEPROM_DELAY);
  2565. }
  2566. return temp;
  2567. }
  2568. static void spi_w(struct ksz_hw *hw, u16 data)
  2569. {
  2570. int i;
  2571. for (i = 15; i >= 0; i--) {
  2572. (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
  2573. drop_gpio(hw, EEPROM_DATA_OUT);
  2574. eeprom_clk(hw);
  2575. }
  2576. }
  2577. static void spi_reg(struct ksz_hw *hw, u8 data, u8 reg)
  2578. {
  2579. int i;
  2580. /* Initial start bit */
  2581. raise_gpio(hw, EEPROM_DATA_OUT);
  2582. eeprom_clk(hw);
  2583. /* AT93C operation */
  2584. for (i = 1; i >= 0; i--) {
  2585. (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
  2586. drop_gpio(hw, EEPROM_DATA_OUT);
  2587. eeprom_clk(hw);
  2588. }
  2589. /* Address location */
  2590. for (i = 5; i >= 0; i--) {
  2591. (reg & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
  2592. drop_gpio(hw, EEPROM_DATA_OUT);
  2593. eeprom_clk(hw);
  2594. }
  2595. }
  2596. #define EEPROM_DATA_RESERVED 0
  2597. #define EEPROM_DATA_MAC_ADDR_0 1
  2598. #define EEPROM_DATA_MAC_ADDR_1 2
  2599. #define EEPROM_DATA_MAC_ADDR_2 3
  2600. #define EEPROM_DATA_SUBSYS_ID 4
  2601. #define EEPROM_DATA_SUBSYS_VEN_ID 5
  2602. #define EEPROM_DATA_PM_CAP 6
  2603. /* User defined EEPROM data */
  2604. #define EEPROM_DATA_OTHER_MAC_ADDR 9
  2605. /**
  2606. * eeprom_read - read from AT93C46 EEPROM
  2607. * @hw: The hardware instance.
  2608. * @reg: The register offset.
  2609. *
  2610. * This function reads a word from the AT93C46 EEPROM.
  2611. *
  2612. * Return the data value.
  2613. */
  2614. static u16 eeprom_read(struct ksz_hw *hw, u8 reg)
  2615. {
  2616. u16 data;
  2617. raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
  2618. spi_reg(hw, AT93C_READ, reg);
  2619. data = spi_r(hw);
  2620. drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
  2621. return data;
  2622. }
  2623. /**
  2624. * eeprom_write - write to AT93C46 EEPROM
  2625. * @hw: The hardware instance.
  2626. * @reg: The register offset.
  2627. * @data: The data value.
  2628. *
  2629. * This procedure writes a word to the AT93C46 EEPROM.
  2630. */
  2631. static void eeprom_write(struct ksz_hw *hw, u8 reg, u16 data)
  2632. {
  2633. int timeout;
  2634. raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
  2635. /* Enable write. */
  2636. spi_reg(hw, AT93C_CODE, AT93C_WR_ON);
  2637. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2638. udelay(1);
  2639. /* Erase the register. */
  2640. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2641. spi_reg(hw, AT93C_ERASE, reg);
  2642. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2643. udelay(1);
  2644. /* Check operation complete. */
  2645. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2646. timeout = 8;
  2647. mdelay(2);
  2648. do {
  2649. mdelay(1);
  2650. } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
  2651. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2652. udelay(1);
  2653. /* Write the register. */
  2654. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2655. spi_reg(hw, AT93C_WRITE, reg);
  2656. spi_w(hw, data);
  2657. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2658. udelay(1);
  2659. /* Check operation complete. */
  2660. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2661. timeout = 8;
  2662. mdelay(2);
  2663. do {
  2664. mdelay(1);
  2665. } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
  2666. drop_gpio(hw, EEPROM_CHIP_SELECT);
  2667. udelay(1);
  2668. /* Disable write. */
  2669. raise_gpio(hw, EEPROM_CHIP_SELECT);
  2670. spi_reg(hw, AT93C_CODE, AT93C_WR_OFF);
  2671. drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
  2672. }
  2673. /*
  2674. * Link detection routines
  2675. */
  2676. static u16 advertised_flow_ctrl(struct ksz_port *port, u16 ctrl)
  2677. {
  2678. ctrl &= ~PORT_AUTO_NEG_SYM_PAUSE;
  2679. switch (port->flow_ctrl) {
  2680. case PHY_FLOW_CTRL:
  2681. ctrl |= PORT_AUTO_NEG_SYM_PAUSE;
  2682. break;
  2683. /* Not supported. */
  2684. case PHY_TX_ONLY:
  2685. case PHY_RX_ONLY:
  2686. default:
  2687. break;
  2688. }
  2689. return ctrl;
  2690. }
  2691. static void set_flow_ctrl(struct ksz_hw *hw, int rx, int tx)
  2692. {
  2693. u32 rx_cfg;
  2694. u32 tx_cfg;
  2695. rx_cfg = hw->rx_cfg;
  2696. tx_cfg = hw->tx_cfg;
  2697. if (rx)
  2698. hw->rx_cfg |= DMA_RX_FLOW_ENABLE;
  2699. else
  2700. hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE;
  2701. if (tx)
  2702. hw->tx_cfg |= DMA_TX_FLOW_ENABLE;
  2703. else
  2704. hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
  2705. if (hw->enabled) {
  2706. if (rx_cfg != hw->rx_cfg)
  2707. writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
  2708. if (tx_cfg != hw->tx_cfg)
  2709. writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
  2710. }
  2711. }
  2712. static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port,
  2713. u16 local, u16 remote)
  2714. {
  2715. int rx;
  2716. int tx;
  2717. if (hw->overrides & PAUSE_FLOW_CTRL)
  2718. return;
  2719. rx = tx = 0;
  2720. if (port->force_link)
  2721. rx = tx = 1;
  2722. if (remote & LPA_PAUSE_CAP) {
  2723. if (local & ADVERTISE_PAUSE_CAP) {
  2724. rx = tx = 1;
  2725. } else if ((remote & LPA_PAUSE_ASYM) &&
  2726. (local &
  2727. (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) ==
  2728. ADVERTISE_PAUSE_ASYM) {
  2729. tx = 1;
  2730. }
  2731. } else if (remote & LPA_PAUSE_ASYM) {
  2732. if ((local & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM))
  2733. == (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM))
  2734. rx = 1;
  2735. }
  2736. if (!hw->ksz_switch)
  2737. set_flow_ctrl(hw, rx, tx);
  2738. }
  2739. static inline void port_cfg_change(struct ksz_hw *hw, struct ksz_port *port,
  2740. struct ksz_port_info *info, u16 link_status)
  2741. {
  2742. if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) &&
  2743. !(hw->overrides & PAUSE_FLOW_CTRL)) {
  2744. u32 cfg = hw->tx_cfg;
  2745. /* Disable flow control in the half duplex mode. */
  2746. if (1 == info->duplex)
  2747. hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
  2748. if (hw->enabled && cfg != hw->tx_cfg)
  2749. writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
  2750. }
  2751. }
  2752. /**
  2753. * port_get_link_speed - get current link status
  2754. * @port: The port instance.
  2755. *
  2756. * This routine reads PHY registers to determine the current link status of the
  2757. * switch ports.
  2758. */
  2759. static void port_get_link_speed(struct ksz_port *port)
  2760. {
  2761. uint interrupt;
  2762. struct ksz_port_info *info;
  2763. struct ksz_port_info *linked = NULL;
  2764. struct ksz_hw *hw = port->hw;
  2765. u16 data;
  2766. u16 status;
  2767. u8 local;
  2768. u8 remote;
  2769. int i;
  2770. int p;
  2771. int change = 0;
  2772. interrupt = hw_block_intr(hw);
  2773. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
  2774. info = &hw->port_info[p];
  2775. port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
  2776. port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
  2777. /*
  2778. * Link status is changing all the time even when there is no
  2779. * cable connection!
  2780. */
  2781. remote = status & (PORT_AUTO_NEG_COMPLETE |
  2782. PORT_STATUS_LINK_GOOD);
  2783. local = (u8) data;
  2784. /* No change to status. */
  2785. if (local == info->advertised && remote == info->partner)
  2786. continue;
  2787. info->advertised = local;
  2788. info->partner = remote;
  2789. if (status & PORT_STATUS_LINK_GOOD) {
  2790. /* Remember the first linked port. */
  2791. if (!linked)
  2792. linked = info;
  2793. info->tx_rate = 10 * TX_RATE_UNIT;
  2794. if (status & PORT_STATUS_SPEED_100MBIT)
  2795. info->tx_rate = 100 * TX_RATE_UNIT;
  2796. info->duplex = 1;
  2797. if (status & PORT_STATUS_FULL_DUPLEX)
  2798. info->duplex = 2;
  2799. if (media_connected != info->state) {
  2800. hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET,
  2801. &data);
  2802. hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET,
  2803. &status);
  2804. determine_flow_ctrl(hw, port, data, status);
  2805. if (hw->ksz_switch) {
  2806. port_cfg_back_pressure(hw, p,
  2807. (1 == info->duplex));
  2808. }
  2809. change |= 1 << i;
  2810. port_cfg_change(hw, port, info, status);
  2811. }
  2812. info->state = media_connected;
  2813. } else {
  2814. if (media_disconnected != info->state) {
  2815. change |= 1 << i;
  2816. /* Indicate the link just goes down. */
  2817. hw->port_mib[p].link_down = 1;
  2818. }
  2819. info->state = media_disconnected;
  2820. }
  2821. hw->port_mib[p].state = (u8) info->state;
  2822. }
  2823. if (linked && media_disconnected == port->linked->state)
  2824. port->linked = linked;
  2825. hw_restore_intr(hw, interrupt);
  2826. }
  2827. #define PHY_RESET_TIMEOUT 10
  2828. /**
  2829. * port_set_link_speed - set port speed
  2830. * @port: The port instance.
  2831. *
  2832. * This routine sets the link speed of the switch ports.
  2833. */
  2834. static void port_set_link_speed(struct ksz_port *port)
  2835. {
  2836. struct ksz_hw *hw = port->hw;
  2837. u16 data;
  2838. u16 cfg;
  2839. u8 status;
  2840. int i;
  2841. int p;
  2842. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
  2843. port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
  2844. port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
  2845. cfg = 0;
  2846. if (status & PORT_STATUS_LINK_GOOD)
  2847. cfg = data;
  2848. data |= PORT_AUTO_NEG_ENABLE;
  2849. data = advertised_flow_ctrl(port, data);
  2850. data |= PORT_AUTO_NEG_100BTX_FD | PORT_AUTO_NEG_100BTX |
  2851. PORT_AUTO_NEG_10BT_FD | PORT_AUTO_NEG_10BT;
  2852. /* Check if manual configuration is specified by the user. */
  2853. if (port->speed || port->duplex) {
  2854. if (10 == port->speed)
  2855. data &= ~(PORT_AUTO_NEG_100BTX_FD |
  2856. PORT_AUTO_NEG_100BTX);
  2857. else if (100 == port->speed)
  2858. data &= ~(PORT_AUTO_NEG_10BT_FD |
  2859. PORT_AUTO_NEG_10BT);
  2860. if (1 == port->duplex)
  2861. data &= ~(PORT_AUTO_NEG_100BTX_FD |
  2862. PORT_AUTO_NEG_10BT_FD);
  2863. else if (2 == port->duplex)
  2864. data &= ~(PORT_AUTO_NEG_100BTX |
  2865. PORT_AUTO_NEG_10BT);
  2866. }
  2867. if (data != cfg) {
  2868. data |= PORT_AUTO_NEG_RESTART;
  2869. port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data);
  2870. }
  2871. }
  2872. }
  2873. /**
  2874. * port_force_link_speed - force port speed
  2875. * @port: The port instance.
  2876. *
  2877. * This routine forces the link speed of the switch ports.
  2878. */
  2879. static void port_force_link_speed(struct ksz_port *port)
  2880. {
  2881. struct ksz_hw *hw = port->hw;
  2882. u16 data;
  2883. int i;
  2884. int phy;
  2885. int p;
  2886. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
  2887. phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
  2888. hw_r_phy_ctrl(hw, phy, &data);
  2889. data &= ~BMCR_ANENABLE;
  2890. if (10 == port->speed)
  2891. data &= ~BMCR_SPEED100;
  2892. else if (100 == port->speed)
  2893. data |= BMCR_SPEED100;
  2894. if (1 == port->duplex)
  2895. data &= ~BMCR_FULLDPLX;
  2896. else if (2 == port->duplex)
  2897. data |= BMCR_FULLDPLX;
  2898. hw_w_phy_ctrl(hw, phy, data);
  2899. }
  2900. }
  2901. static void port_set_power_saving(struct ksz_port *port, int enable)
  2902. {
  2903. struct ksz_hw *hw = port->hw;
  2904. int i;
  2905. int p;
  2906. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++)
  2907. port_cfg(hw, p,
  2908. KS884X_PORT_CTRL_4_OFFSET, PORT_POWER_DOWN, enable);
  2909. }
  2910. /*
  2911. * KSZ8841 power management functions
  2912. */
  2913. /**
  2914. * hw_chk_wol_pme_status - check PMEN pin
  2915. * @hw: The hardware instance.
  2916. *
  2917. * This function is used to check PMEN pin is asserted.
  2918. *
  2919. * Return 1 if PMEN pin is asserted; otherwise, 0.
  2920. */
  2921. static int hw_chk_wol_pme_status(struct ksz_hw *hw)
  2922. {
  2923. struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
  2924. struct pci_dev *pdev = hw_priv->pdev;
  2925. u16 data;
  2926. if (!pdev->pm_cap)
  2927. return 0;
  2928. pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
  2929. return (data & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS;
  2930. }
  2931. /**
  2932. * hw_clr_wol_pme_status - clear PMEN pin
  2933. * @hw: The hardware instance.
  2934. *
  2935. * This routine is used to clear PME_Status to deassert PMEN pin.
  2936. */
  2937. static void hw_clr_wol_pme_status(struct ksz_hw *hw)
  2938. {
  2939. struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
  2940. struct pci_dev *pdev = hw_priv->pdev;
  2941. u16 data;
  2942. if (!pdev->pm_cap)
  2943. return;
  2944. /* Clear PME_Status to deassert PMEN pin. */
  2945. pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
  2946. data |= PCI_PM_CTRL_PME_STATUS;
  2947. pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
  2948. }
  2949. /**
  2950. * hw_cfg_wol_pme - enable or disable Wake-on-LAN
  2951. * @hw: The hardware instance.
  2952. * @set: The flag indicating whether to enable or disable.
  2953. *
  2954. * This routine is used to enable or disable Wake-on-LAN.
  2955. */
  2956. static void hw_cfg_wol_pme(struct ksz_hw *hw, int set)
  2957. {
  2958. struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
  2959. struct pci_dev *pdev = hw_priv->pdev;
  2960. u16 data;
  2961. if (!pdev->pm_cap)
  2962. return;
  2963. pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
  2964. data &= ~PCI_PM_CTRL_STATE_MASK;
  2965. if (set)
  2966. data |= PCI_PM_CTRL_PME_ENABLE | PCI_D3hot;
  2967. else
  2968. data &= ~PCI_PM_CTRL_PME_ENABLE;
  2969. pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
  2970. }
  2971. /**
  2972. * hw_cfg_wol - configure Wake-on-LAN features
  2973. * @hw: The hardware instance.
  2974. * @frame: The pattern frame bit.
  2975. * @set: The flag indicating whether to enable or disable.
  2976. *
  2977. * This routine is used to enable or disable certain Wake-on-LAN features.
  2978. */
  2979. static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
  2980. {
  2981. u16 data;
  2982. data = readw(hw->io + KS8841_WOL_CTRL_OFFSET);
  2983. if (set)
  2984. data |= frame;
  2985. else
  2986. data &= ~frame;
  2987. writew(data, hw->io + KS8841_WOL_CTRL_OFFSET);
  2988. }
  2989. /**
  2990. * hw_set_wol_frame - program Wake-on-LAN pattern
  2991. * @hw: The hardware instance.
  2992. * @i: The frame index.
  2993. * @mask_size: The size of the mask.
  2994. * @mask: Mask to ignore certain bytes in the pattern.
  2995. * @frame_size: The size of the frame.
  2996. * @pattern: The frame data.
  2997. *
  2998. * This routine is used to program Wake-on-LAN pattern.
  2999. */
  3000. static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
  3001. const u8 *mask, uint frame_size, const u8 *pattern)
  3002. {
  3003. int bits;
  3004. int from;
  3005. int len;
  3006. int to;
  3007. u32 crc;
  3008. u8 data[64];
  3009. u8 val = 0;
  3010. if (frame_size > mask_size * 8)
  3011. frame_size = mask_size * 8;
  3012. if (frame_size > 64)
  3013. frame_size = 64;
  3014. i *= 0x10;
  3015. writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i);
  3016. writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i);
  3017. bits = len = from = to = 0;
  3018. do {
  3019. if (bits) {
  3020. if ((val & 1))
  3021. data[to++] = pattern[from];
  3022. val >>= 1;
  3023. ++from;
  3024. --bits;
  3025. } else {
  3026. val = mask[len];
  3027. writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i
  3028. + len);
  3029. ++len;
  3030. if (val)
  3031. bits = 8;
  3032. else
  3033. from += 8;
  3034. }
  3035. } while (from < (int) frame_size);
  3036. if (val) {
  3037. bits = mask[len - 1];
  3038. val <<= (from % 8);
  3039. bits &= ~val;
  3040. writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len -
  3041. 1);
  3042. }
  3043. crc = ether_crc(to, data);
  3044. writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i);
  3045. }
  3046. /**
  3047. * hw_add_wol_arp - add ARP pattern
  3048. * @hw: The hardware instance.
  3049. * @ip_addr: The IPv4 address assigned to the device.
  3050. *
  3051. * This routine is used to add ARP pattern for waking up the host.
  3052. */
  3053. static void hw_add_wol_arp(struct ksz_hw *hw, const u8 *ip_addr)
  3054. {
  3055. static const u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
  3056. u8 pattern[42] = {
  3057. 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
  3058. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  3059. 0x08, 0x06,
  3060. 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
  3061. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  3062. 0x00, 0x00, 0x00, 0x00,
  3063. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  3064. 0x00, 0x00, 0x00, 0x00 };
  3065. memcpy(&pattern[38], ip_addr, 4);
  3066. hw_set_wol_frame(hw, 3, 6, mask, 42, pattern);
  3067. }
  3068. /**
  3069. * hw_add_wol_bcast - add broadcast pattern
  3070. * @hw: The hardware instance.
  3071. *
  3072. * This routine is used to add broadcast pattern for waking up the host.
  3073. */
  3074. static void hw_add_wol_bcast(struct ksz_hw *hw)
  3075. {
  3076. static const u8 mask[] = { 0x3F };
  3077. static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
  3078. hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern);
  3079. }
  3080. /**
  3081. * hw_add_wol_mcast - add multicast pattern
  3082. * @hw: The hardware instance.
  3083. *
  3084. * This routine is used to add multicast pattern for waking up the host.
  3085. *
  3086. * It is assumed the multicast packet is the ICMPv6 neighbor solicitation used
  3087. * by IPv6 ping command. Note that multicast packets are filtred through the
  3088. * multicast hash table, so not all multicast packets can wake up the host.
  3089. */
  3090. static void hw_add_wol_mcast(struct ksz_hw *hw)
  3091. {
  3092. static const u8 mask[] = { 0x3F };
  3093. u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
  3094. memcpy(&pattern[3], &hw->override_addr[3], 3);
  3095. hw_set_wol_frame(hw, 1, 1, mask, 6, pattern);
  3096. }
  3097. /**
  3098. * hw_add_wol_ucast - add unicast pattern
  3099. * @hw: The hardware instance.
  3100. *
  3101. * This routine is used to add unicast pattern to wakeup the host.
  3102. *
  3103. * It is assumed the unicast packet is directed to the device, as the hardware
  3104. * can only receive them in normal case.
  3105. */
  3106. static void hw_add_wol_ucast(struct ksz_hw *hw)
  3107. {
  3108. static const u8 mask[] = { 0x3F };
  3109. hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr);
  3110. }
  3111. /**
  3112. * hw_enable_wol - enable Wake-on-LAN
  3113. * @hw: The hardware instance.
  3114. * @wol_enable: The Wake-on-LAN settings.
  3115. * @net_addr: The IPv4 address assigned to the device.
  3116. *
  3117. * This routine is used to enable Wake-on-LAN depending on driver settings.
  3118. */
  3119. static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, const u8 *net_addr)
  3120. {
  3121. hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
  3122. hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
  3123. hw_add_wol_ucast(hw);
  3124. hw_cfg_wol(hw, KS8841_WOL_FRAME1_ENABLE, (wol_enable & WAKE_MCAST));
  3125. hw_add_wol_mcast(hw);
  3126. hw_cfg_wol(hw, KS8841_WOL_FRAME2_ENABLE, (wol_enable & WAKE_BCAST));
  3127. hw_cfg_wol(hw, KS8841_WOL_FRAME3_ENABLE, (wol_enable & WAKE_ARP));
  3128. hw_add_wol_arp(hw, net_addr);
  3129. }
  3130. /**
  3131. * hw_init - check driver is correct for the hardware
  3132. * @hw: The hardware instance.
  3133. *
  3134. * This function checks the hardware is correct for this driver and sets the
  3135. * hardware up for proper initialization.
  3136. *
  3137. * Return number of ports or 0 if not right.
  3138. */
  3139. static int hw_init(struct ksz_hw *hw)
  3140. {
  3141. int rc = 0;
  3142. u16 data;
  3143. u16 revision;
  3144. /* Set bus speed to 125MHz. */
  3145. writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET);
  3146. /* Check KSZ884x chip ID. */
  3147. data = readw(hw->io + KS884X_CHIP_ID_OFFSET);
  3148. revision = (data & KS884X_REVISION_MASK) >> KS884X_REVISION_SHIFT;
  3149. data &= KS884X_CHIP_ID_MASK_41;
  3150. if (REG_CHIP_ID_41 == data)
  3151. rc = 1;
  3152. else if (REG_CHIP_ID_42 == data)
  3153. rc = 2;
  3154. else
  3155. return 0;
  3156. /* Setup hardware features or bug workarounds. */
  3157. if (revision <= 1) {
  3158. hw->features |= SMALL_PACKET_TX_BUG;
  3159. if (1 == rc)
  3160. hw->features |= HALF_DUPLEX_SIGNAL_BUG;
  3161. }
  3162. return rc;
  3163. }
  3164. /**
  3165. * hw_reset - reset the hardware
  3166. * @hw: The hardware instance.
  3167. *
  3168. * This routine resets the hardware.
  3169. */
  3170. static void hw_reset(struct ksz_hw *hw)
  3171. {
  3172. writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
  3173. /* Wait for device to reset. */
  3174. mdelay(10);
  3175. /* Write 0 to clear device reset. */
  3176. writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
  3177. }
  3178. /**
  3179. * hw_setup - setup the hardware
  3180. * @hw: The hardware instance.
  3181. *
  3182. * This routine setup the hardware for proper operation.
  3183. */
  3184. static void hw_setup(struct ksz_hw *hw)
  3185. {
  3186. #if SET_DEFAULT_LED
  3187. u16 data;
  3188. /* Change default LED mode. */
  3189. data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
  3190. data &= ~LED_MODE;
  3191. data |= SET_DEFAULT_LED;
  3192. writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
  3193. #endif
  3194. /* Setup transmit control. */
  3195. hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE |
  3196. (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_TX_ENABLE);
  3197. /* Setup receive control. */
  3198. hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST |
  3199. (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_RX_ENABLE);
  3200. hw->rx_cfg |= KS884X_DMA_RX_MULTICAST;
  3201. /* Hardware cannot handle UDP packet in IP fragments. */
  3202. hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
  3203. if (hw->all_multi)
  3204. hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
  3205. if (hw->promiscuous)
  3206. hw->rx_cfg |= DMA_RX_PROMISCUOUS;
  3207. }
  3208. /**
  3209. * hw_setup_intr - setup interrupt mask
  3210. * @hw: The hardware instance.
  3211. *
  3212. * This routine setup the interrupt mask for proper operation.
  3213. */
  3214. static void hw_setup_intr(struct ksz_hw *hw)
  3215. {
  3216. hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN;
  3217. }
  3218. static void ksz_check_desc_num(struct ksz_desc_info *info)
  3219. {
  3220. #define MIN_DESC_SHIFT 2
  3221. int alloc = info->alloc;
  3222. int shift;
  3223. shift = 0;
  3224. while (!(alloc & 1)) {
  3225. shift++;
  3226. alloc >>= 1;
  3227. }
  3228. if (alloc != 1 || shift < MIN_DESC_SHIFT) {
  3229. pr_alert("Hardware descriptor numbers not right!\n");
  3230. while (alloc) {
  3231. shift++;
  3232. alloc >>= 1;
  3233. }
  3234. if (shift < MIN_DESC_SHIFT)
  3235. shift = MIN_DESC_SHIFT;
  3236. alloc = 1 << shift;
  3237. info->alloc = alloc;
  3238. }
  3239. info->mask = info->alloc - 1;
  3240. }
  3241. static void hw_init_desc(struct ksz_desc_info *desc_info, int transmit)
  3242. {
  3243. int i;
  3244. u32 phys = desc_info->ring_phys;
  3245. struct ksz_hw_desc *desc = desc_info->ring_virt;
  3246. struct ksz_desc *cur = desc_info->ring;
  3247. struct ksz_desc *previous = NULL;
  3248. for (i = 0; i < desc_info->alloc; i++) {
  3249. cur->phw = desc++;
  3250. phys += desc_info->size;
  3251. previous = cur++;
  3252. previous->phw->next = cpu_to_le32(phys);
  3253. }
  3254. previous->phw->next = cpu_to_le32(desc_info->ring_phys);
  3255. previous->sw.buf.rx.end_of_ring = 1;
  3256. previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data);
  3257. desc_info->avail = desc_info->alloc;
  3258. desc_info->last = desc_info->next = 0;
  3259. desc_info->cur = desc_info->ring;
  3260. }
  3261. /**
  3262. * hw_set_desc_base - set descriptor base addresses
  3263. * @hw: The hardware instance.
  3264. * @tx_addr: The transmit descriptor base.
  3265. * @rx_addr: The receive descriptor base.
  3266. *
  3267. * This routine programs the descriptor base addresses after reset.
  3268. */
  3269. static void hw_set_desc_base(struct ksz_hw *hw, u32 tx_addr, u32 rx_addr)
  3270. {
  3271. /* Set base address of Tx/Rx descriptors. */
  3272. writel(tx_addr, hw->io + KS_DMA_TX_ADDR);
  3273. writel(rx_addr, hw->io + KS_DMA_RX_ADDR);
  3274. }
  3275. static void hw_reset_pkts(struct ksz_desc_info *info)
  3276. {
  3277. info->cur = info->ring;
  3278. info->avail = info->alloc;
  3279. info->last = info->next = 0;
  3280. }
  3281. static inline void hw_resume_rx(struct ksz_hw *hw)
  3282. {
  3283. writel(DMA_START, hw->io + KS_DMA_RX_START);
  3284. }
  3285. /**
  3286. * hw_start_rx - start receiving
  3287. * @hw: The hardware instance.
  3288. *
  3289. * This routine starts the receive function of the hardware.
  3290. */
  3291. static void hw_start_rx(struct ksz_hw *hw)
  3292. {
  3293. writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
  3294. /* Notify when the receive stops. */
  3295. hw->intr_mask |= KS884X_INT_RX_STOPPED;
  3296. writel(DMA_START, hw->io + KS_DMA_RX_START);
  3297. hw_ack_intr(hw, KS884X_INT_RX_STOPPED);
  3298. hw->rx_stop++;
  3299. /* Variable overflows. */
  3300. if (0 == hw->rx_stop)
  3301. hw->rx_stop = 2;
  3302. }
  3303. /**
  3304. * hw_stop_rx - stop receiving
  3305. * @hw: The hardware instance.
  3306. *
  3307. * This routine stops the receive function of the hardware.
  3308. */
  3309. static void hw_stop_rx(struct ksz_hw *hw)
  3310. {
  3311. hw->rx_stop = 0;
  3312. hw_turn_off_intr(hw, KS884X_INT_RX_STOPPED);
  3313. writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL);
  3314. }
  3315. /**
  3316. * hw_start_tx - start transmitting
  3317. * @hw: The hardware instance.
  3318. *
  3319. * This routine starts the transmit function of the hardware.
  3320. */
  3321. static void hw_start_tx(struct ksz_hw *hw)
  3322. {
  3323. writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
  3324. }
  3325. /**
  3326. * hw_stop_tx - stop transmitting
  3327. * @hw: The hardware instance.
  3328. *
  3329. * This routine stops the transmit function of the hardware.
  3330. */
  3331. static void hw_stop_tx(struct ksz_hw *hw)
  3332. {
  3333. writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL);
  3334. }
  3335. /**
  3336. * hw_disable - disable hardware
  3337. * @hw: The hardware instance.
  3338. *
  3339. * This routine disables the hardware.
  3340. */
  3341. static void hw_disable(struct ksz_hw *hw)
  3342. {
  3343. hw_stop_rx(hw);
  3344. hw_stop_tx(hw);
  3345. hw->enabled = 0;
  3346. }
  3347. /**
  3348. * hw_enable - enable hardware
  3349. * @hw: The hardware instance.
  3350. *
  3351. * This routine enables the hardware.
  3352. */
  3353. static void hw_enable(struct ksz_hw *hw)
  3354. {
  3355. hw_start_tx(hw);
  3356. hw_start_rx(hw);
  3357. hw->enabled = 1;
  3358. }
  3359. /**
  3360. * hw_alloc_pkt - allocate enough descriptors for transmission
  3361. * @hw: The hardware instance.
  3362. * @length: The length of the packet.
  3363. * @physical: Number of descriptors required.
  3364. *
  3365. * This function allocates descriptors for transmission.
  3366. *
  3367. * Return 0 if not successful; 1 for buffer copy; or number of descriptors.
  3368. */
  3369. static int hw_alloc_pkt(struct ksz_hw *hw, int length, int physical)
  3370. {
  3371. /* Always leave one descriptor free. */
  3372. if (hw->tx_desc_info.avail <= 1)
  3373. return 0;
  3374. /* Allocate a descriptor for transmission and mark it current. */
  3375. get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur);
  3376. hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1;
  3377. /* Keep track of number of transmit descriptors used so far. */
  3378. ++hw->tx_int_cnt;
  3379. hw->tx_size += length;
  3380. /* Cannot hold on too much data. */
  3381. if (hw->tx_size >= MAX_TX_HELD_SIZE)
  3382. hw->tx_int_cnt = hw->tx_int_mask + 1;
  3383. if (physical > hw->tx_desc_info.avail)
  3384. return 1;
  3385. return hw->tx_desc_info.avail;
  3386. }
  3387. /**
  3388. * hw_send_pkt - mark packet for transmission
  3389. * @hw: The hardware instance.
  3390. *
  3391. * This routine marks the packet for transmission in PCI version.
  3392. */
  3393. static void hw_send_pkt(struct ksz_hw *hw)
  3394. {
  3395. struct ksz_desc *cur = hw->tx_desc_info.cur;
  3396. cur->sw.buf.tx.last_seg = 1;
  3397. /* Interrupt only after specified number of descriptors used. */
  3398. if (hw->tx_int_cnt > hw->tx_int_mask) {
  3399. cur->sw.buf.tx.intr = 1;
  3400. hw->tx_int_cnt = 0;
  3401. hw->tx_size = 0;
  3402. }
  3403. /* KSZ8842 supports port directed transmission. */
  3404. cur->sw.buf.tx.dest_port = hw->dst_ports;
  3405. release_desc(cur);
  3406. writel(0, hw->io + KS_DMA_TX_START);
  3407. }
  3408. static int empty_addr(u8 *addr)
  3409. {
  3410. u32 *addr1 = (u32 *) addr;
  3411. u16 *addr2 = (u16 *) &addr[4];
  3412. return 0 == *addr1 && 0 == *addr2;
  3413. }
  3414. /**
  3415. * hw_set_addr - set MAC address
  3416. * @hw: The hardware instance.
  3417. *
  3418. * This routine programs the MAC address of the hardware when the address is
  3419. * overridden.
  3420. */
  3421. static void hw_set_addr(struct ksz_hw *hw)
  3422. {
  3423. int i;
  3424. for (i = 0; i < ETH_ALEN; i++)
  3425. writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
  3426. hw->io + KS884X_ADDR_0_OFFSET + i);
  3427. sw_set_addr(hw, hw->override_addr);
  3428. }
  3429. /**
  3430. * hw_read_addr - read MAC address
  3431. * @hw: The hardware instance.
  3432. *
  3433. * This routine retrieves the MAC address of the hardware.
  3434. */
  3435. static void hw_read_addr(struct ksz_hw *hw)
  3436. {
  3437. int i;
  3438. for (i = 0; i < ETH_ALEN; i++)
  3439. hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
  3440. KS884X_ADDR_0_OFFSET + i);
  3441. if (!hw->mac_override) {
  3442. memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN);
  3443. if (empty_addr(hw->override_addr)) {
  3444. memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN);
  3445. memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
  3446. ETH_ALEN);
  3447. hw->override_addr[5] += hw->id;
  3448. hw_set_addr(hw);
  3449. }
  3450. }
  3451. }
  3452. static void hw_ena_add_addr(struct ksz_hw *hw, int index, u8 *mac_addr)
  3453. {
  3454. int i;
  3455. u32 mac_addr_lo;
  3456. u32 mac_addr_hi;
  3457. mac_addr_hi = 0;
  3458. for (i = 0; i < 2; i++) {
  3459. mac_addr_hi <<= 8;
  3460. mac_addr_hi |= mac_addr[i];
  3461. }
  3462. mac_addr_hi |= ADD_ADDR_ENABLE;
  3463. mac_addr_lo = 0;
  3464. for (i = 2; i < 6; i++) {
  3465. mac_addr_lo <<= 8;
  3466. mac_addr_lo |= mac_addr[i];
  3467. }
  3468. index *= ADD_ADDR_INCR;
  3469. writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO);
  3470. writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI);
  3471. }
  3472. static void hw_set_add_addr(struct ksz_hw *hw)
  3473. {
  3474. int i;
  3475. for (i = 0; i < ADDITIONAL_ENTRIES; i++) {
  3476. if (empty_addr(hw->address[i]))
  3477. writel(0, hw->io + ADD_ADDR_INCR * i +
  3478. KS_ADD_ADDR_0_HI);
  3479. else
  3480. hw_ena_add_addr(hw, i, hw->address[i]);
  3481. }
  3482. }
  3483. static int hw_add_addr(struct ksz_hw *hw, const u8 *mac_addr)
  3484. {
  3485. int i;
  3486. int j = ADDITIONAL_ENTRIES;
  3487. if (ether_addr_equal(hw->override_addr, mac_addr))
  3488. return 0;
  3489. for (i = 0; i < hw->addr_list_size; i++) {
  3490. if (ether_addr_equal(hw->address[i], mac_addr))
  3491. return 0;
  3492. if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
  3493. j = i;
  3494. }
  3495. if (j < ADDITIONAL_ENTRIES) {
  3496. memcpy(hw->address[j], mac_addr, ETH_ALEN);
  3497. hw_ena_add_addr(hw, j, hw->address[j]);
  3498. return 0;
  3499. }
  3500. return -1;
  3501. }
  3502. static int hw_del_addr(struct ksz_hw *hw, const u8 *mac_addr)
  3503. {
  3504. int i;
  3505. for (i = 0; i < hw->addr_list_size; i++) {
  3506. if (ether_addr_equal(hw->address[i], mac_addr)) {
  3507. eth_zero_addr(hw->address[i]);
  3508. writel(0, hw->io + ADD_ADDR_INCR * i +
  3509. KS_ADD_ADDR_0_HI);
  3510. return 0;
  3511. }
  3512. }
  3513. return -1;
  3514. }
  3515. /**
  3516. * hw_clr_multicast - clear multicast addresses
  3517. * @hw: The hardware instance.
  3518. *
  3519. * This routine removes all multicast addresses set in the hardware.
  3520. */
  3521. static void hw_clr_multicast(struct ksz_hw *hw)
  3522. {
  3523. int i;
  3524. for (i = 0; i < HW_MULTICAST_SIZE; i++) {
  3525. hw->multi_bits[i] = 0;
  3526. writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i);
  3527. }
  3528. }
  3529. /**
  3530. * hw_set_grp_addr - set multicast addresses
  3531. * @hw: The hardware instance.
  3532. *
  3533. * This routine programs multicast addresses for the hardware to accept those
  3534. * addresses.
  3535. */
  3536. static void hw_set_grp_addr(struct ksz_hw *hw)
  3537. {
  3538. int i;
  3539. int index;
  3540. int position;
  3541. int value;
  3542. memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE);
  3543. for (i = 0; i < hw->multi_list_size; i++) {
  3544. position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f;
  3545. index = position >> 3;
  3546. value = 1 << (position & 7);
  3547. hw->multi_bits[index] |= (u8) value;
  3548. }
  3549. for (i = 0; i < HW_MULTICAST_SIZE; i++)
  3550. writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET +
  3551. i);
  3552. }
  3553. /**
  3554. * hw_set_multicast - enable or disable all multicast receiving
  3555. * @hw: The hardware instance.
  3556. * @multicast: To turn on or off the all multicast feature.
  3557. *
  3558. * This routine enables/disables the hardware to accept all multicast packets.
  3559. */
  3560. static void hw_set_multicast(struct ksz_hw *hw, u8 multicast)
  3561. {
  3562. /* Stop receiving for reconfiguration. */
  3563. hw_stop_rx(hw);
  3564. if (multicast)
  3565. hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
  3566. else
  3567. hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST;
  3568. if (hw->enabled)
  3569. hw_start_rx(hw);
  3570. }
  3571. /**
  3572. * hw_set_promiscuous - enable or disable promiscuous receiving
  3573. * @hw: The hardware instance.
  3574. * @prom: To turn on or off the promiscuous feature.
  3575. *
  3576. * This routine enables/disables the hardware to accept all packets.
  3577. */
  3578. static void hw_set_promiscuous(struct ksz_hw *hw, u8 prom)
  3579. {
  3580. /* Stop receiving for reconfiguration. */
  3581. hw_stop_rx(hw);
  3582. if (prom)
  3583. hw->rx_cfg |= DMA_RX_PROMISCUOUS;
  3584. else
  3585. hw->rx_cfg &= ~DMA_RX_PROMISCUOUS;
  3586. if (hw->enabled)
  3587. hw_start_rx(hw);
  3588. }
  3589. /**
  3590. * sw_enable - enable the switch
  3591. * @hw: The hardware instance.
  3592. * @enable: The flag to enable or disable the switch
  3593. *
  3594. * This routine is used to enable/disable the switch in KSZ8842.
  3595. */
  3596. static void sw_enable(struct ksz_hw *hw, int enable)
  3597. {
  3598. int port;
  3599. for (port = 0; port < SWITCH_PORT_NUM; port++) {
  3600. if (hw->dev_count > 1) {
  3601. /* Set port-base vlan membership with host port. */
  3602. sw_cfg_port_base_vlan(hw, port,
  3603. HOST_MASK | (1 << port));
  3604. port_set_stp_state(hw, port, STP_STATE_DISABLED);
  3605. } else {
  3606. sw_cfg_port_base_vlan(hw, port, PORT_MASK);
  3607. port_set_stp_state(hw, port, STP_STATE_FORWARDING);
  3608. }
  3609. }
  3610. if (hw->dev_count > 1)
  3611. port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
  3612. else
  3613. port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_FORWARDING);
  3614. if (enable)
  3615. enable = KS8842_START;
  3616. writew(enable, hw->io + KS884X_CHIP_ID_OFFSET);
  3617. }
  3618. /**
  3619. * sw_setup - setup the switch
  3620. * @hw: The hardware instance.
  3621. *
  3622. * This routine setup the hardware switch engine for default operation.
  3623. */
  3624. static void sw_setup(struct ksz_hw *hw)
  3625. {
  3626. int port;
  3627. sw_set_global_ctrl(hw);
  3628. /* Enable switch broadcast storm protection at 10% percent rate. */
  3629. sw_init_broad_storm(hw);
  3630. hw_cfg_broad_storm(hw, BROADCAST_STORM_PROTECTION_RATE);
  3631. for (port = 0; port < SWITCH_PORT_NUM; port++)
  3632. sw_ena_broad_storm(hw, port);
  3633. sw_init_prio(hw);
  3634. sw_init_mirror(hw);
  3635. sw_init_prio_rate(hw);
  3636. sw_init_vlan(hw);
  3637. if (hw->features & STP_SUPPORT)
  3638. sw_init_stp(hw);
  3639. if (!sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  3640. SWITCH_TX_FLOW_CTRL | SWITCH_RX_FLOW_CTRL))
  3641. hw->overrides |= PAUSE_FLOW_CTRL;
  3642. sw_enable(hw, 1);
  3643. }
  3644. /**
  3645. * ksz_start_timer - start kernel timer
  3646. * @info: Kernel timer information.
  3647. * @time: The time tick.
  3648. *
  3649. * This routine starts the kernel timer after the specified time tick.
  3650. */
  3651. static void ksz_start_timer(struct ksz_timer_info *info, int time)
  3652. {
  3653. info->cnt = 0;
  3654. info->timer.expires = jiffies + time;
  3655. add_timer(&info->timer);
  3656. /* infinity */
  3657. info->max = -1;
  3658. }
  3659. /**
  3660. * ksz_stop_timer - stop kernel timer
  3661. * @info: Kernel timer information.
  3662. *
  3663. * This routine stops the kernel timer.
  3664. */
  3665. static void ksz_stop_timer(struct ksz_timer_info *info)
  3666. {
  3667. if (info->max) {
  3668. info->max = 0;
  3669. del_timer_sync(&info->timer);
  3670. }
  3671. }
  3672. static void ksz_init_timer(struct ksz_timer_info *info, int period,
  3673. void (*function)(struct timer_list *))
  3674. {
  3675. info->max = 0;
  3676. info->period = period;
  3677. timer_setup(&info->timer, function, 0);
  3678. }
  3679. static void ksz_update_timer(struct ksz_timer_info *info)
  3680. {
  3681. ++info->cnt;
  3682. if (info->max > 0) {
  3683. if (info->cnt < info->max) {
  3684. info->timer.expires = jiffies + info->period;
  3685. add_timer(&info->timer);
  3686. } else
  3687. info->max = 0;
  3688. } else if (info->max < 0) {
  3689. info->timer.expires = jiffies + info->period;
  3690. add_timer(&info->timer);
  3691. }
  3692. }
  3693. /**
  3694. * ksz_alloc_soft_desc - allocate software descriptors
  3695. * @desc_info: Descriptor information structure.
  3696. * @transmit: Indication that descriptors are for transmit.
  3697. *
  3698. * This local function allocates software descriptors for manipulation in
  3699. * memory.
  3700. *
  3701. * Return 0 if successful.
  3702. */
  3703. static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
  3704. {
  3705. desc_info->ring = kcalloc(desc_info->alloc, sizeof(struct ksz_desc),
  3706. GFP_KERNEL);
  3707. if (!desc_info->ring)
  3708. return 1;
  3709. hw_init_desc(desc_info, transmit);
  3710. return 0;
  3711. }
  3712. /**
  3713. * ksz_alloc_desc - allocate hardware descriptors
  3714. * @adapter: Adapter information structure.
  3715. *
  3716. * This local function allocates hardware descriptors for receiving and
  3717. * transmitting.
  3718. *
  3719. * Return 0 if successful.
  3720. */
  3721. static int ksz_alloc_desc(struct dev_info *adapter)
  3722. {
  3723. struct ksz_hw *hw = &adapter->hw;
  3724. int offset;
  3725. /* Allocate memory for RX & TX descriptors. */
  3726. adapter->desc_pool.alloc_size =
  3727. hw->rx_desc_info.size * hw->rx_desc_info.alloc +
  3728. hw->tx_desc_info.size * hw->tx_desc_info.alloc +
  3729. DESC_ALIGNMENT;
  3730. adapter->desc_pool.alloc_virt =
  3731. dma_alloc_coherent(&adapter->pdev->dev,
  3732. adapter->desc_pool.alloc_size,
  3733. &adapter->desc_pool.dma_addr, GFP_KERNEL);
  3734. if (adapter->desc_pool.alloc_virt == NULL) {
  3735. adapter->desc_pool.alloc_size = 0;
  3736. return 1;
  3737. }
  3738. /* Align to the next cache line boundary. */
  3739. offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ?
  3740. (DESC_ALIGNMENT -
  3741. ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0);
  3742. adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset;
  3743. adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset;
  3744. /* Allocate receive/transmit descriptors. */
  3745. hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *)
  3746. adapter->desc_pool.virt;
  3747. hw->rx_desc_info.ring_phys = adapter->desc_pool.phys;
  3748. offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size;
  3749. hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *)
  3750. (adapter->desc_pool.virt + offset);
  3751. hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset;
  3752. if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0))
  3753. return 1;
  3754. if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1))
  3755. return 1;
  3756. return 0;
  3757. }
  3758. /**
  3759. * free_dma_buf - release DMA buffer resources
  3760. * @adapter: Adapter information structure.
  3761. * @dma_buf: pointer to buf
  3762. * @direction: to or from device
  3763. *
  3764. * This routine is just a helper function to release the DMA buffer resources.
  3765. */
  3766. static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf,
  3767. int direction)
  3768. {
  3769. dma_unmap_single(&adapter->pdev->dev, dma_buf->dma, dma_buf->len,
  3770. direction);
  3771. dev_kfree_skb(dma_buf->skb);
  3772. dma_buf->skb = NULL;
  3773. dma_buf->dma = 0;
  3774. }
  3775. /**
  3776. * ksz_init_rx_buffers - initialize receive descriptors
  3777. * @adapter: Adapter information structure.
  3778. *
  3779. * This routine initializes DMA buffers for receiving.
  3780. */
  3781. static void ksz_init_rx_buffers(struct dev_info *adapter)
  3782. {
  3783. int i;
  3784. struct ksz_desc *desc;
  3785. struct ksz_dma_buf *dma_buf;
  3786. struct ksz_hw *hw = &adapter->hw;
  3787. struct ksz_desc_info *info = &hw->rx_desc_info;
  3788. for (i = 0; i < hw->rx_desc_info.alloc; i++) {
  3789. get_rx_pkt(info, &desc);
  3790. dma_buf = DMA_BUFFER(desc);
  3791. if (dma_buf->skb && dma_buf->len != adapter->mtu)
  3792. free_dma_buf(adapter, dma_buf, DMA_FROM_DEVICE);
  3793. dma_buf->len = adapter->mtu;
  3794. if (!dma_buf->skb)
  3795. dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
  3796. if (dma_buf->skb && !dma_buf->dma)
  3797. dma_buf->dma = dma_map_single(&adapter->pdev->dev,
  3798. skb_tail_pointer(dma_buf->skb),
  3799. dma_buf->len,
  3800. DMA_FROM_DEVICE);
  3801. /* Set descriptor. */
  3802. set_rx_buf(desc, dma_buf->dma);
  3803. set_rx_len(desc, dma_buf->len);
  3804. release_desc(desc);
  3805. }
  3806. }
  3807. /**
  3808. * ksz_alloc_mem - allocate memory for hardware descriptors
  3809. * @adapter: Adapter information structure.
  3810. *
  3811. * This function allocates memory for use by hardware descriptors for receiving
  3812. * and transmitting.
  3813. *
  3814. * Return 0 if successful.
  3815. */
  3816. static int ksz_alloc_mem(struct dev_info *adapter)
  3817. {
  3818. struct ksz_hw *hw = &adapter->hw;
  3819. /* Determine the number of receive and transmit descriptors. */
  3820. hw->rx_desc_info.alloc = NUM_OF_RX_DESC;
  3821. hw->tx_desc_info.alloc = NUM_OF_TX_DESC;
  3822. /* Determine how many descriptors to skip transmit interrupt. */
  3823. hw->tx_int_cnt = 0;
  3824. hw->tx_int_mask = NUM_OF_TX_DESC / 4;
  3825. if (hw->tx_int_mask > 8)
  3826. hw->tx_int_mask = 8;
  3827. while (hw->tx_int_mask) {
  3828. hw->tx_int_cnt++;
  3829. hw->tx_int_mask >>= 1;
  3830. }
  3831. if (hw->tx_int_cnt) {
  3832. hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1;
  3833. hw->tx_int_cnt = 0;
  3834. }
  3835. /* Determine the descriptor size. */
  3836. hw->rx_desc_info.size =
  3837. (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
  3838. DESC_ALIGNMENT) * DESC_ALIGNMENT);
  3839. hw->tx_desc_info.size =
  3840. (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
  3841. DESC_ALIGNMENT) * DESC_ALIGNMENT);
  3842. if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
  3843. pr_alert("Hardware descriptor size not right!\n");
  3844. ksz_check_desc_num(&hw->rx_desc_info);
  3845. ksz_check_desc_num(&hw->tx_desc_info);
  3846. /* Allocate descriptors. */
  3847. if (ksz_alloc_desc(adapter))
  3848. return 1;
  3849. return 0;
  3850. }
  3851. /**
  3852. * ksz_free_desc - free software and hardware descriptors
  3853. * @adapter: Adapter information structure.
  3854. *
  3855. * This local routine frees the software and hardware descriptors allocated by
  3856. * ksz_alloc_desc().
  3857. */
  3858. static void ksz_free_desc(struct dev_info *adapter)
  3859. {
  3860. struct ksz_hw *hw = &adapter->hw;
  3861. /* Reset descriptor. */
  3862. hw->rx_desc_info.ring_virt = NULL;
  3863. hw->tx_desc_info.ring_virt = NULL;
  3864. hw->rx_desc_info.ring_phys = 0;
  3865. hw->tx_desc_info.ring_phys = 0;
  3866. /* Free memory. */
  3867. if (adapter->desc_pool.alloc_virt)
  3868. dma_free_coherent(&adapter->pdev->dev,
  3869. adapter->desc_pool.alloc_size,
  3870. adapter->desc_pool.alloc_virt,
  3871. adapter->desc_pool.dma_addr);
  3872. /* Reset resource pool. */
  3873. adapter->desc_pool.alloc_size = 0;
  3874. adapter->desc_pool.alloc_virt = NULL;
  3875. kfree(hw->rx_desc_info.ring);
  3876. hw->rx_desc_info.ring = NULL;
  3877. kfree(hw->tx_desc_info.ring);
  3878. hw->tx_desc_info.ring = NULL;
  3879. }
  3880. /**
  3881. * ksz_free_buffers - free buffers used in the descriptors
  3882. * @adapter: Adapter information structure.
  3883. * @desc_info: Descriptor information structure.
  3884. * @direction: to or from device
  3885. *
  3886. * This local routine frees buffers used in the DMA buffers.
  3887. */
  3888. static void ksz_free_buffers(struct dev_info *adapter,
  3889. struct ksz_desc_info *desc_info, int direction)
  3890. {
  3891. int i;
  3892. struct ksz_dma_buf *dma_buf;
  3893. struct ksz_desc *desc = desc_info->ring;
  3894. for (i = 0; i < desc_info->alloc; i++) {
  3895. dma_buf = DMA_BUFFER(desc);
  3896. if (dma_buf->skb)
  3897. free_dma_buf(adapter, dma_buf, direction);
  3898. desc++;
  3899. }
  3900. }
  3901. /**
  3902. * ksz_free_mem - free all resources used by descriptors
  3903. * @adapter: Adapter information structure.
  3904. *
  3905. * This local routine frees all the resources allocated by ksz_alloc_mem().
  3906. */
  3907. static void ksz_free_mem(struct dev_info *adapter)
  3908. {
  3909. /* Free transmit buffers. */
  3910. ksz_free_buffers(adapter, &adapter->hw.tx_desc_info, DMA_TO_DEVICE);
  3911. /* Free receive buffers. */
  3912. ksz_free_buffers(adapter, &adapter->hw.rx_desc_info, DMA_FROM_DEVICE);
  3913. /* Free descriptors. */
  3914. ksz_free_desc(adapter);
  3915. }
  3916. static void get_mib_counters(struct ksz_hw *hw, int first, int cnt,
  3917. u64 *counter)
  3918. {
  3919. int i;
  3920. int mib;
  3921. int port;
  3922. struct ksz_port_mib *port_mib;
  3923. memset(counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
  3924. for (i = 0, port = first; i < cnt; i++, port++) {
  3925. port_mib = &hw->port_mib[port];
  3926. for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++)
  3927. counter[mib] += port_mib->counter[mib];
  3928. }
  3929. }
  3930. /**
  3931. * send_packet - send packet
  3932. * @skb: Socket buffer.
  3933. * @dev: Network device.
  3934. *
  3935. * This routine is used to send a packet out to the network.
  3936. */
  3937. static void send_packet(struct sk_buff *skb, struct net_device *dev)
  3938. {
  3939. struct ksz_desc *desc;
  3940. struct ksz_desc *first;
  3941. struct dev_priv *priv = netdev_priv(dev);
  3942. struct dev_info *hw_priv = priv->adapter;
  3943. struct ksz_hw *hw = &hw_priv->hw;
  3944. struct ksz_desc_info *info = &hw->tx_desc_info;
  3945. struct ksz_dma_buf *dma_buf;
  3946. int len;
  3947. int last_frag = skb_shinfo(skb)->nr_frags;
  3948. /*
  3949. * KSZ8842 with multiple device interfaces needs to be told which port
  3950. * to send.
  3951. */
  3952. if (hw->dev_count > 1)
  3953. hw->dst_ports = 1 << priv->port.first_port;
  3954. /* Hardware will pad the length to 60. */
  3955. len = skb->len;
  3956. /* Remember the very first descriptor. */
  3957. first = info->cur;
  3958. desc = first;
  3959. dma_buf = DMA_BUFFER(desc);
  3960. if (last_frag) {
  3961. int frag;
  3962. skb_frag_t *this_frag;
  3963. dma_buf->len = skb_headlen(skb);
  3964. dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data,
  3965. dma_buf->len, DMA_TO_DEVICE);
  3966. set_tx_buf(desc, dma_buf->dma);
  3967. set_tx_len(desc, dma_buf->len);
  3968. frag = 0;
  3969. do {
  3970. this_frag = &skb_shinfo(skb)->frags[frag];
  3971. /* Get a new descriptor. */
  3972. get_tx_pkt(info, &desc);
  3973. /* Keep track of descriptors used so far. */
  3974. ++hw->tx_int_cnt;
  3975. dma_buf = DMA_BUFFER(desc);
  3976. dma_buf->len = skb_frag_size(this_frag);
  3977. dma_buf->dma = dma_map_single(&hw_priv->pdev->dev,
  3978. skb_frag_address(this_frag),
  3979. dma_buf->len,
  3980. DMA_TO_DEVICE);
  3981. set_tx_buf(desc, dma_buf->dma);
  3982. set_tx_len(desc, dma_buf->len);
  3983. frag++;
  3984. if (frag == last_frag)
  3985. break;
  3986. /* Do not release the last descriptor here. */
  3987. release_desc(desc);
  3988. } while (1);
  3989. /* current points to the last descriptor. */
  3990. info->cur = desc;
  3991. /* Release the first descriptor. */
  3992. release_desc(first);
  3993. } else {
  3994. dma_buf->len = len;
  3995. dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data,
  3996. dma_buf->len, DMA_TO_DEVICE);
  3997. set_tx_buf(desc, dma_buf->dma);
  3998. set_tx_len(desc, dma_buf->len);
  3999. }
  4000. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  4001. (desc)->sw.buf.tx.csum_gen_tcp = 1;
  4002. (desc)->sw.buf.tx.csum_gen_udp = 1;
  4003. }
  4004. /*
  4005. * The last descriptor holds the packet so that it can be returned to
  4006. * network subsystem after all descriptors are transmitted.
  4007. */
  4008. dma_buf->skb = skb;
  4009. hw_send_pkt(hw);
  4010. /* Update transmit statistics. */
  4011. dev->stats.tx_packets++;
  4012. dev->stats.tx_bytes += len;
  4013. }
  4014. /**
  4015. * transmit_cleanup - clean up transmit descriptors
  4016. * @hw_priv: Network device.
  4017. * @normal: break if owned
  4018. *
  4019. * This routine is called to clean up the transmitted buffers.
  4020. */
  4021. static void transmit_cleanup(struct dev_info *hw_priv, int normal)
  4022. {
  4023. int last;
  4024. union desc_stat status;
  4025. struct ksz_hw *hw = &hw_priv->hw;
  4026. struct ksz_desc_info *info = &hw->tx_desc_info;
  4027. struct ksz_desc *desc;
  4028. struct ksz_dma_buf *dma_buf;
  4029. struct net_device *dev = NULL;
  4030. spin_lock_irq(&hw_priv->hwlock);
  4031. last = info->last;
  4032. while (info->avail < info->alloc) {
  4033. /* Get next descriptor which is not hardware owned. */
  4034. desc = &info->ring[last];
  4035. status.data = le32_to_cpu(desc->phw->ctrl.data);
  4036. if (status.tx.hw_owned) {
  4037. if (normal)
  4038. break;
  4039. else
  4040. reset_desc(desc, status);
  4041. }
  4042. dma_buf = DMA_BUFFER(desc);
  4043. dma_unmap_single(&hw_priv->pdev->dev, dma_buf->dma,
  4044. dma_buf->len, DMA_TO_DEVICE);
  4045. /* This descriptor contains the last buffer in the packet. */
  4046. if (dma_buf->skb) {
  4047. dev = dma_buf->skb->dev;
  4048. /* Release the packet back to network subsystem. */
  4049. dev_kfree_skb_irq(dma_buf->skb);
  4050. dma_buf->skb = NULL;
  4051. }
  4052. /* Free the transmitted descriptor. */
  4053. last++;
  4054. last &= info->mask;
  4055. info->avail++;
  4056. }
  4057. info->last = last;
  4058. spin_unlock_irq(&hw_priv->hwlock);
  4059. /* Notify the network subsystem that the packet has been sent. */
  4060. if (dev)
  4061. netif_trans_update(dev);
  4062. }
  4063. /**
  4064. * tx_done - transmit done processing
  4065. * @hw_priv: Network device.
  4066. *
  4067. * This routine is called when the transmit interrupt is triggered, indicating
  4068. * either a packet is sent successfully or there are transmit errors.
  4069. */
  4070. static void tx_done(struct dev_info *hw_priv)
  4071. {
  4072. struct ksz_hw *hw = &hw_priv->hw;
  4073. int port;
  4074. transmit_cleanup(hw_priv, 1);
  4075. for (port = 0; port < hw->dev_count; port++) {
  4076. struct net_device *dev = hw->port_info[port].pdev;
  4077. if (netif_running(dev) && netif_queue_stopped(dev))
  4078. netif_wake_queue(dev);
  4079. }
  4080. }
  4081. static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
  4082. {
  4083. skb->dev = old->dev;
  4084. skb->protocol = old->protocol;
  4085. skb->ip_summed = old->ip_summed;
  4086. skb->csum = old->csum;
  4087. skb_set_network_header(skb, ETH_HLEN);
  4088. dev_consume_skb_any(old);
  4089. }
  4090. /**
  4091. * netdev_tx - send out packet
  4092. * @skb: Socket buffer.
  4093. * @dev: Network device.
  4094. *
  4095. * This function is used by the upper network layer to send out a packet.
  4096. *
  4097. * Return 0 if successful; otherwise an error code indicating failure.
  4098. */
  4099. static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
  4100. {
  4101. struct dev_priv *priv = netdev_priv(dev);
  4102. struct dev_info *hw_priv = priv->adapter;
  4103. struct ksz_hw *hw = &hw_priv->hw;
  4104. int left;
  4105. int num = 1;
  4106. int rc = 0;
  4107. if (hw->features & SMALL_PACKET_TX_BUG) {
  4108. struct sk_buff *org_skb = skb;
  4109. if (skb->len <= 48) {
  4110. if (skb_end_pointer(skb) - skb->data >= 50) {
  4111. memset(&skb->data[skb->len], 0, 50 - skb->len);
  4112. skb->len = 50;
  4113. } else {
  4114. skb = netdev_alloc_skb(dev, 50);
  4115. if (!skb)
  4116. return NETDEV_TX_BUSY;
  4117. memcpy(skb->data, org_skb->data, org_skb->len);
  4118. memset(&skb->data[org_skb->len], 0,
  4119. 50 - org_skb->len);
  4120. skb->len = 50;
  4121. copy_old_skb(org_skb, skb);
  4122. }
  4123. }
  4124. }
  4125. spin_lock_irq(&hw_priv->hwlock);
  4126. num = skb_shinfo(skb)->nr_frags + 1;
  4127. left = hw_alloc_pkt(hw, skb->len, num);
  4128. if (left) {
  4129. if (left < num ||
  4130. (CHECKSUM_PARTIAL == skb->ip_summed &&
  4131. skb->protocol == htons(ETH_P_IPV6))) {
  4132. struct sk_buff *org_skb = skb;
  4133. skb = netdev_alloc_skb(dev, org_skb->len);
  4134. if (!skb) {
  4135. rc = NETDEV_TX_BUSY;
  4136. goto unlock;
  4137. }
  4138. skb_copy_and_csum_dev(org_skb, skb->data);
  4139. org_skb->ip_summed = CHECKSUM_NONE;
  4140. skb->len = org_skb->len;
  4141. copy_old_skb(org_skb, skb);
  4142. }
  4143. send_packet(skb, dev);
  4144. if (left <= num)
  4145. netif_stop_queue(dev);
  4146. } else {
  4147. /* Stop the transmit queue until packet is allocated. */
  4148. netif_stop_queue(dev);
  4149. rc = NETDEV_TX_BUSY;
  4150. }
  4151. unlock:
  4152. spin_unlock_irq(&hw_priv->hwlock);
  4153. return rc;
  4154. }
  4155. /**
  4156. * netdev_tx_timeout - transmit timeout processing
  4157. * @dev: Network device.
  4158. * @txqueue: index of hanging queue
  4159. *
  4160. * This routine is called when the transmit timer expires. That indicates the
  4161. * hardware is not running correctly because transmit interrupts are not
  4162. * triggered to free up resources so that the transmit routine can continue
  4163. * sending out packets. The hardware is reset to correct the problem.
  4164. */
  4165. static void netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
  4166. {
  4167. static unsigned long last_reset;
  4168. struct dev_priv *priv = netdev_priv(dev);
  4169. struct dev_info *hw_priv = priv->adapter;
  4170. struct ksz_hw *hw = &hw_priv->hw;
  4171. int port;
  4172. if (hw->dev_count > 1) {
  4173. /*
  4174. * Only reset the hardware if time between calls is long
  4175. * enough.
  4176. */
  4177. if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo))
  4178. hw_priv = NULL;
  4179. }
  4180. last_reset = jiffies;
  4181. if (hw_priv) {
  4182. hw_dis_intr(hw);
  4183. hw_disable(hw);
  4184. transmit_cleanup(hw_priv, 0);
  4185. hw_reset_pkts(&hw->rx_desc_info);
  4186. hw_reset_pkts(&hw->tx_desc_info);
  4187. ksz_init_rx_buffers(hw_priv);
  4188. hw_reset(hw);
  4189. hw_set_desc_base(hw,
  4190. hw->tx_desc_info.ring_phys,
  4191. hw->rx_desc_info.ring_phys);
  4192. hw_set_addr(hw);
  4193. if (hw->all_multi)
  4194. hw_set_multicast(hw, hw->all_multi);
  4195. else if (hw->multi_list_size)
  4196. hw_set_grp_addr(hw);
  4197. if (hw->dev_count > 1) {
  4198. hw_set_add_addr(hw);
  4199. for (port = 0; port < SWITCH_PORT_NUM; port++) {
  4200. struct net_device *port_dev;
  4201. port_set_stp_state(hw, port,
  4202. STP_STATE_DISABLED);
  4203. port_dev = hw->port_info[port].pdev;
  4204. if (netif_running(port_dev))
  4205. port_set_stp_state(hw, port,
  4206. STP_STATE_SIMPLE);
  4207. }
  4208. }
  4209. hw_enable(hw);
  4210. hw_ena_intr(hw);
  4211. }
  4212. netif_trans_update(dev);
  4213. netif_wake_queue(dev);
  4214. }
  4215. static inline void csum_verified(struct sk_buff *skb)
  4216. {
  4217. unsigned short protocol;
  4218. struct iphdr *iph;
  4219. protocol = skb->protocol;
  4220. skb_reset_network_header(skb);
  4221. iph = (struct iphdr *) skb_network_header(skb);
  4222. if (protocol == htons(ETH_P_8021Q)) {
  4223. protocol = iph->tot_len;
  4224. skb_set_network_header(skb, VLAN_HLEN);
  4225. iph = (struct iphdr *) skb_network_header(skb);
  4226. }
  4227. if (protocol == htons(ETH_P_IP)) {
  4228. if (iph->protocol == IPPROTO_TCP)
  4229. skb->ip_summed = CHECKSUM_UNNECESSARY;
  4230. }
  4231. }
  4232. static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
  4233. struct ksz_desc *desc, union desc_stat status)
  4234. {
  4235. int packet_len;
  4236. struct dev_priv *priv = netdev_priv(dev);
  4237. struct dev_info *hw_priv = priv->adapter;
  4238. struct ksz_dma_buf *dma_buf;
  4239. struct sk_buff *skb;
  4240. /* Received length includes 4-byte CRC. */
  4241. packet_len = status.rx.frame_len - 4;
  4242. dma_buf = DMA_BUFFER(desc);
  4243. dma_sync_single_for_cpu(&hw_priv->pdev->dev, dma_buf->dma,
  4244. packet_len + 4, DMA_FROM_DEVICE);
  4245. do {
  4246. /* skb->data != skb->head */
  4247. skb = netdev_alloc_skb(dev, packet_len + 2);
  4248. if (!skb) {
  4249. dev->stats.rx_dropped++;
  4250. return -ENOMEM;
  4251. }
  4252. /*
  4253. * Align socket buffer in 4-byte boundary for better
  4254. * performance.
  4255. */
  4256. skb_reserve(skb, 2);
  4257. skb_put_data(skb, dma_buf->skb->data, packet_len);
  4258. } while (0);
  4259. skb->protocol = eth_type_trans(skb, dev);
  4260. if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
  4261. csum_verified(skb);
  4262. /* Update receive statistics. */
  4263. dev->stats.rx_packets++;
  4264. dev->stats.rx_bytes += packet_len;
  4265. /* Notify upper layer for received packet. */
  4266. netif_rx(skb);
  4267. return 0;
  4268. }
  4269. static int dev_rcv_packets(struct dev_info *hw_priv)
  4270. {
  4271. int next;
  4272. union desc_stat status;
  4273. struct ksz_hw *hw = &hw_priv->hw;
  4274. struct net_device *dev = hw->port_info[0].pdev;
  4275. struct ksz_desc_info *info = &hw->rx_desc_info;
  4276. int left = info->alloc;
  4277. struct ksz_desc *desc;
  4278. int received = 0;
  4279. next = info->next;
  4280. while (left--) {
  4281. /* Get next descriptor which is not hardware owned. */
  4282. desc = &info->ring[next];
  4283. status.data = le32_to_cpu(desc->phw->ctrl.data);
  4284. if (status.rx.hw_owned)
  4285. break;
  4286. /* Status valid only when last descriptor bit is set. */
  4287. if (status.rx.last_desc && status.rx.first_desc) {
  4288. if (rx_proc(dev, hw, desc, status))
  4289. goto release_packet;
  4290. received++;
  4291. }
  4292. release_packet:
  4293. release_desc(desc);
  4294. next++;
  4295. next &= info->mask;
  4296. }
  4297. info->next = next;
  4298. return received;
  4299. }
  4300. static int port_rcv_packets(struct dev_info *hw_priv)
  4301. {
  4302. int next;
  4303. union desc_stat status;
  4304. struct ksz_hw *hw = &hw_priv->hw;
  4305. struct net_device *dev = hw->port_info[0].pdev;
  4306. struct ksz_desc_info *info = &hw->rx_desc_info;
  4307. int left = info->alloc;
  4308. struct ksz_desc *desc;
  4309. int received = 0;
  4310. next = info->next;
  4311. while (left--) {
  4312. /* Get next descriptor which is not hardware owned. */
  4313. desc = &info->ring[next];
  4314. status.data = le32_to_cpu(desc->phw->ctrl.data);
  4315. if (status.rx.hw_owned)
  4316. break;
  4317. if (hw->dev_count > 1) {
  4318. /* Get received port number. */
  4319. int p = HW_TO_DEV_PORT(status.rx.src_port);
  4320. dev = hw->port_info[p].pdev;
  4321. if (!netif_running(dev))
  4322. goto release_packet;
  4323. }
  4324. /* Status valid only when last descriptor bit is set. */
  4325. if (status.rx.last_desc && status.rx.first_desc) {
  4326. if (rx_proc(dev, hw, desc, status))
  4327. goto release_packet;
  4328. received++;
  4329. }
  4330. release_packet:
  4331. release_desc(desc);
  4332. next++;
  4333. next &= info->mask;
  4334. }
  4335. info->next = next;
  4336. return received;
  4337. }
  4338. static int dev_rcv_special(struct dev_info *hw_priv)
  4339. {
  4340. int next;
  4341. union desc_stat status;
  4342. struct ksz_hw *hw = &hw_priv->hw;
  4343. struct net_device *dev = hw->port_info[0].pdev;
  4344. struct ksz_desc_info *info = &hw->rx_desc_info;
  4345. int left = info->alloc;
  4346. struct ksz_desc *desc;
  4347. int received = 0;
  4348. next = info->next;
  4349. while (left--) {
  4350. /* Get next descriptor which is not hardware owned. */
  4351. desc = &info->ring[next];
  4352. status.data = le32_to_cpu(desc->phw->ctrl.data);
  4353. if (status.rx.hw_owned)
  4354. break;
  4355. if (hw->dev_count > 1) {
  4356. /* Get received port number. */
  4357. int p = HW_TO_DEV_PORT(status.rx.src_port);
  4358. dev = hw->port_info[p].pdev;
  4359. if (!netif_running(dev))
  4360. goto release_packet;
  4361. }
  4362. /* Status valid only when last descriptor bit is set. */
  4363. if (status.rx.last_desc && status.rx.first_desc) {
  4364. /*
  4365. * Receive without error. With receive errors
  4366. * disabled, packets with receive errors will be
  4367. * dropped, so no need to check the error bit.
  4368. */
  4369. if (!status.rx.error || (status.data &
  4370. KS_DESC_RX_ERROR_COND) ==
  4371. KS_DESC_RX_ERROR_TOO_LONG) {
  4372. if (rx_proc(dev, hw, desc, status))
  4373. goto release_packet;
  4374. received++;
  4375. } else {
  4376. struct dev_priv *priv = netdev_priv(dev);
  4377. /* Update receive error statistics. */
  4378. priv->port.counter[OID_COUNTER_RCV_ERROR]++;
  4379. }
  4380. }
  4381. release_packet:
  4382. release_desc(desc);
  4383. next++;
  4384. next &= info->mask;
  4385. }
  4386. info->next = next;
  4387. return received;
  4388. }
  4389. static void rx_proc_task(struct tasklet_struct *t)
  4390. {
  4391. struct dev_info *hw_priv = from_tasklet(hw_priv, t, rx_tasklet);
  4392. struct ksz_hw *hw = &hw_priv->hw;
  4393. if (!hw->enabled)
  4394. return;
  4395. if (unlikely(!hw_priv->dev_rcv(hw_priv))) {
  4396. /* In case receive process is suspended because of overrun. */
  4397. hw_resume_rx(hw);
  4398. /* tasklets are interruptible. */
  4399. spin_lock_irq(&hw_priv->hwlock);
  4400. hw_turn_on_intr(hw, KS884X_INT_RX_MASK);
  4401. spin_unlock_irq(&hw_priv->hwlock);
  4402. } else {
  4403. hw_ack_intr(hw, KS884X_INT_RX);
  4404. tasklet_schedule(&hw_priv->rx_tasklet);
  4405. }
  4406. }
  4407. static void tx_proc_task(struct tasklet_struct *t)
  4408. {
  4409. struct dev_info *hw_priv = from_tasklet(hw_priv, t, tx_tasklet);
  4410. struct ksz_hw *hw = &hw_priv->hw;
  4411. hw_ack_intr(hw, KS884X_INT_TX_MASK);
  4412. tx_done(hw_priv);
  4413. /* tasklets are interruptible. */
  4414. spin_lock_irq(&hw_priv->hwlock);
  4415. hw_turn_on_intr(hw, KS884X_INT_TX);
  4416. spin_unlock_irq(&hw_priv->hwlock);
  4417. }
  4418. static inline void handle_rx_stop(struct ksz_hw *hw)
  4419. {
  4420. /* Receive just has been stopped. */
  4421. if (0 == hw->rx_stop)
  4422. hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
  4423. else if (hw->rx_stop > 1) {
  4424. if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) {
  4425. hw_start_rx(hw);
  4426. } else {
  4427. hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
  4428. hw->rx_stop = 0;
  4429. }
  4430. } else
  4431. /* Receive just has been started. */
  4432. hw->rx_stop++;
  4433. }
  4434. /**
  4435. * netdev_intr - interrupt handling
  4436. * @irq: Interrupt number.
  4437. * @dev_id: Network device.
  4438. *
  4439. * This function is called by upper network layer to signal interrupt.
  4440. *
  4441. * Return IRQ_HANDLED if interrupt is handled.
  4442. */
  4443. static irqreturn_t netdev_intr(int irq, void *dev_id)
  4444. {
  4445. uint int_enable = 0;
  4446. struct net_device *dev = (struct net_device *) dev_id;
  4447. struct dev_priv *priv = netdev_priv(dev);
  4448. struct dev_info *hw_priv = priv->adapter;
  4449. struct ksz_hw *hw = &hw_priv->hw;
  4450. spin_lock(&hw_priv->hwlock);
  4451. hw_read_intr(hw, &int_enable);
  4452. /* Not our interrupt! */
  4453. if (!int_enable) {
  4454. spin_unlock(&hw_priv->hwlock);
  4455. return IRQ_NONE;
  4456. }
  4457. do {
  4458. hw_ack_intr(hw, int_enable);
  4459. int_enable &= hw->intr_mask;
  4460. if (unlikely(int_enable & KS884X_INT_TX_MASK)) {
  4461. hw_dis_intr_bit(hw, KS884X_INT_TX_MASK);
  4462. tasklet_schedule(&hw_priv->tx_tasklet);
  4463. }
  4464. if (likely(int_enable & KS884X_INT_RX)) {
  4465. hw_dis_intr_bit(hw, KS884X_INT_RX);
  4466. tasklet_schedule(&hw_priv->rx_tasklet);
  4467. }
  4468. if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) {
  4469. dev->stats.rx_fifo_errors++;
  4470. hw_resume_rx(hw);
  4471. }
  4472. if (unlikely(int_enable & KS884X_INT_PHY)) {
  4473. struct ksz_port *port = &priv->port;
  4474. hw->features |= LINK_INT_WORKING;
  4475. port_get_link_speed(port);
  4476. }
  4477. if (unlikely(int_enable & KS884X_INT_RX_STOPPED)) {
  4478. handle_rx_stop(hw);
  4479. break;
  4480. }
  4481. if (unlikely(int_enable & KS884X_INT_TX_STOPPED)) {
  4482. u32 data;
  4483. hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
  4484. pr_info("Tx stopped\n");
  4485. data = readl(hw->io + KS_DMA_TX_CTRL);
  4486. if (!(data & DMA_TX_ENABLE))
  4487. pr_info("Tx disabled\n");
  4488. break;
  4489. }
  4490. } while (0);
  4491. hw_ena_intr(hw);
  4492. spin_unlock(&hw_priv->hwlock);
  4493. return IRQ_HANDLED;
  4494. }
  4495. /*
  4496. * Linux network device functions
  4497. */
  4498. #ifdef CONFIG_NET_POLL_CONTROLLER
  4499. static void netdev_netpoll(struct net_device *dev)
  4500. {
  4501. struct dev_priv *priv = netdev_priv(dev);
  4502. struct dev_info *hw_priv = priv->adapter;
  4503. hw_dis_intr(&hw_priv->hw);
  4504. netdev_intr(dev->irq, dev);
  4505. }
  4506. #endif
  4507. static void bridge_change(struct ksz_hw *hw)
  4508. {
  4509. int port;
  4510. u8 member;
  4511. struct ksz_switch *sw = hw->ksz_switch;
  4512. /* No ports in forwarding state. */
  4513. if (!sw->member) {
  4514. port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
  4515. sw_block_addr(hw);
  4516. }
  4517. for (port = 0; port < SWITCH_PORT_NUM; port++) {
  4518. if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state)
  4519. member = HOST_MASK | sw->member;
  4520. else
  4521. member = HOST_MASK | (1 << port);
  4522. if (member != sw->port_cfg[port].member)
  4523. sw_cfg_port_base_vlan(hw, port, member);
  4524. }
  4525. }
  4526. /**
  4527. * netdev_close - close network device
  4528. * @dev: Network device.
  4529. *
  4530. * This function process the close operation of network device. This is caused
  4531. * by the user command "ifconfig ethX down."
  4532. *
  4533. * Return 0 if successful; otherwise an error code indicating failure.
  4534. */
  4535. static int netdev_close(struct net_device *dev)
  4536. {
  4537. struct dev_priv *priv = netdev_priv(dev);
  4538. struct dev_info *hw_priv = priv->adapter;
  4539. struct ksz_port *port = &priv->port;
  4540. struct ksz_hw *hw = &hw_priv->hw;
  4541. int pi;
  4542. netif_stop_queue(dev);
  4543. ksz_stop_timer(&priv->monitor_timer_info);
  4544. /* Need to shut the port manually in multiple device interfaces mode. */
  4545. if (hw->dev_count > 1) {
  4546. port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED);
  4547. /* Port is closed. Need to change bridge setting. */
  4548. if (hw->features & STP_SUPPORT) {
  4549. pi = 1 << port->first_port;
  4550. if (hw->ksz_switch->member & pi) {
  4551. hw->ksz_switch->member &= ~pi;
  4552. bridge_change(hw);
  4553. }
  4554. }
  4555. }
  4556. if (port->first_port > 0)
  4557. hw_del_addr(hw, dev->dev_addr);
  4558. if (!hw_priv->wol_enable)
  4559. port_set_power_saving(port, true);
  4560. if (priv->multicast)
  4561. --hw->all_multi;
  4562. if (priv->promiscuous)
  4563. --hw->promiscuous;
  4564. hw_priv->opened--;
  4565. if (!(hw_priv->opened)) {
  4566. ksz_stop_timer(&hw_priv->mib_timer_info);
  4567. flush_work(&hw_priv->mib_read);
  4568. hw_dis_intr(hw);
  4569. hw_disable(hw);
  4570. hw_clr_multicast(hw);
  4571. /* Delay for receive task to stop scheduling itself. */
  4572. msleep(2000 / HZ);
  4573. tasklet_kill(&hw_priv->rx_tasklet);
  4574. tasklet_kill(&hw_priv->tx_tasklet);
  4575. free_irq(dev->irq, hw_priv->dev);
  4576. transmit_cleanup(hw_priv, 0);
  4577. hw_reset_pkts(&hw->rx_desc_info);
  4578. hw_reset_pkts(&hw->tx_desc_info);
  4579. /* Clean out static MAC table when the switch is shutdown. */
  4580. if (hw->features & STP_SUPPORT)
  4581. sw_clr_sta_mac_table(hw);
  4582. }
  4583. return 0;
  4584. }
  4585. static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw)
  4586. {
  4587. if (hw->ksz_switch) {
  4588. u32 data;
  4589. data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
  4590. if (hw->features & RX_HUGE_FRAME)
  4591. data |= SWITCH_HUGE_PACKET;
  4592. else
  4593. data &= ~SWITCH_HUGE_PACKET;
  4594. writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
  4595. }
  4596. if (hw->features & RX_HUGE_FRAME) {
  4597. hw->rx_cfg |= DMA_RX_ERROR;
  4598. hw_priv->dev_rcv = dev_rcv_special;
  4599. } else {
  4600. hw->rx_cfg &= ~DMA_RX_ERROR;
  4601. if (hw->dev_count > 1)
  4602. hw_priv->dev_rcv = port_rcv_packets;
  4603. else
  4604. hw_priv->dev_rcv = dev_rcv_packets;
  4605. }
  4606. }
  4607. static int prepare_hardware(struct net_device *dev)
  4608. {
  4609. struct dev_priv *priv = netdev_priv(dev);
  4610. struct dev_info *hw_priv = priv->adapter;
  4611. struct ksz_hw *hw = &hw_priv->hw;
  4612. int rc = 0;
  4613. /* Remember the network device that requests interrupts. */
  4614. hw_priv->dev = dev;
  4615. rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
  4616. if (rc)
  4617. return rc;
  4618. tasklet_setup(&hw_priv->rx_tasklet, rx_proc_task);
  4619. tasklet_setup(&hw_priv->tx_tasklet, tx_proc_task);
  4620. hw->promiscuous = 0;
  4621. hw->all_multi = 0;
  4622. hw->multi_list_size = 0;
  4623. hw_reset(hw);
  4624. hw_set_desc_base(hw,
  4625. hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys);
  4626. hw_set_addr(hw);
  4627. hw_cfg_huge_frame(hw_priv, hw);
  4628. ksz_init_rx_buffers(hw_priv);
  4629. return 0;
  4630. }
  4631. static void set_media_state(struct net_device *dev, int media_state)
  4632. {
  4633. struct dev_priv *priv = netdev_priv(dev);
  4634. if (media_state == priv->media_state)
  4635. netif_carrier_on(dev);
  4636. else
  4637. netif_carrier_off(dev);
  4638. netif_info(priv, link, dev, "link %s\n",
  4639. media_state == priv->media_state ? "on" : "off");
  4640. }
  4641. /**
  4642. * netdev_open - open network device
  4643. * @dev: Network device.
  4644. *
  4645. * This function process the open operation of network device. This is caused
  4646. * by the user command "ifconfig ethX up."
  4647. *
  4648. * Return 0 if successful; otherwise an error code indicating failure.
  4649. */
  4650. static int netdev_open(struct net_device *dev)
  4651. {
  4652. struct dev_priv *priv = netdev_priv(dev);
  4653. struct dev_info *hw_priv = priv->adapter;
  4654. struct ksz_hw *hw = &hw_priv->hw;
  4655. struct ksz_port *port = &priv->port;
  4656. unsigned long next_jiffies;
  4657. int i;
  4658. int p;
  4659. int rc = 0;
  4660. next_jiffies = jiffies + HZ * 2;
  4661. priv->multicast = 0;
  4662. priv->promiscuous = 0;
  4663. /* Reset device statistics. */
  4664. memset(&dev->stats, 0, sizeof(struct net_device_stats));
  4665. memset((void *) port->counter, 0,
  4666. (sizeof(u64) * OID_COUNTER_LAST));
  4667. if (!(hw_priv->opened)) {
  4668. rc = prepare_hardware(dev);
  4669. if (rc)
  4670. return rc;
  4671. for (i = 0; i < hw->mib_port_cnt; i++) {
  4672. next_jiffies += HZ * 1;
  4673. hw_priv->counter[i].time = next_jiffies;
  4674. hw->port_mib[i].state = media_disconnected;
  4675. port_init_cnt(hw, i);
  4676. }
  4677. if (hw->ksz_switch)
  4678. hw->port_mib[HOST_PORT].state = media_connected;
  4679. else {
  4680. hw_add_wol_bcast(hw);
  4681. hw_cfg_wol_pme(hw, 0);
  4682. hw_clr_wol_pme_status(&hw_priv->hw);
  4683. }
  4684. }
  4685. port_set_power_saving(port, false);
  4686. for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
  4687. /*
  4688. * Initialize to invalid value so that link detection
  4689. * is done.
  4690. */
  4691. hw->port_info[p].partner = 0xFF;
  4692. hw->port_info[p].state = media_disconnected;
  4693. }
  4694. /* Need to open the port in multiple device interfaces mode. */
  4695. if (hw->dev_count > 1) {
  4696. port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE);
  4697. if (port->first_port > 0)
  4698. hw_add_addr(hw, dev->dev_addr);
  4699. }
  4700. port_get_link_speed(port);
  4701. if (port->force_link)
  4702. port_force_link_speed(port);
  4703. else
  4704. port_set_link_speed(port);
  4705. if (!(hw_priv->opened)) {
  4706. hw_setup_intr(hw);
  4707. hw_enable(hw);
  4708. hw_ena_intr(hw);
  4709. if (hw->mib_port_cnt)
  4710. ksz_start_timer(&hw_priv->mib_timer_info,
  4711. hw_priv->mib_timer_info.period);
  4712. }
  4713. hw_priv->opened++;
  4714. ksz_start_timer(&priv->monitor_timer_info,
  4715. priv->monitor_timer_info.period);
  4716. priv->media_state = port->linked->state;
  4717. set_media_state(dev, media_connected);
  4718. netif_start_queue(dev);
  4719. return 0;
  4720. }
  4721. /* RX errors = rx_errors */
  4722. /* RX dropped = rx_dropped */
  4723. /* RX overruns = rx_fifo_errors */
  4724. /* RX frame = rx_crc_errors + rx_frame_errors + rx_length_errors */
  4725. /* TX errors = tx_errors */
  4726. /* TX dropped = tx_dropped */
  4727. /* TX overruns = tx_fifo_errors */
  4728. /* TX carrier = tx_aborted_errors + tx_carrier_errors + tx_window_errors */
  4729. /* collisions = collisions */
  4730. /**
  4731. * netdev_query_statistics - query network device statistics
  4732. * @dev: Network device.
  4733. *
  4734. * This function returns the statistics of the network device. The device
  4735. * needs not be opened.
  4736. *
  4737. * Return network device statistics.
  4738. */
  4739. static struct net_device_stats *netdev_query_statistics(struct net_device *dev)
  4740. {
  4741. struct dev_priv *priv = netdev_priv(dev);
  4742. struct ksz_port *port = &priv->port;
  4743. struct ksz_hw *hw = &priv->adapter->hw;
  4744. struct ksz_port_mib *mib;
  4745. int i;
  4746. int p;
  4747. dev->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
  4748. dev->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
  4749. /* Reset to zero to add count later. */
  4750. dev->stats.multicast = 0;
  4751. dev->stats.collisions = 0;
  4752. dev->stats.rx_length_errors = 0;
  4753. dev->stats.rx_crc_errors = 0;
  4754. dev->stats.rx_frame_errors = 0;
  4755. dev->stats.tx_window_errors = 0;
  4756. for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
  4757. mib = &hw->port_mib[p];
  4758. dev->stats.multicast += (unsigned long)
  4759. mib->counter[MIB_COUNTER_RX_MULTICAST];
  4760. dev->stats.collisions += (unsigned long)
  4761. mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION];
  4762. dev->stats.rx_length_errors += (unsigned long)(
  4763. mib->counter[MIB_COUNTER_RX_UNDERSIZE] +
  4764. mib->counter[MIB_COUNTER_RX_FRAGMENT] +
  4765. mib->counter[MIB_COUNTER_RX_OVERSIZE] +
  4766. mib->counter[MIB_COUNTER_RX_JABBER]);
  4767. dev->stats.rx_crc_errors += (unsigned long)
  4768. mib->counter[MIB_COUNTER_RX_CRC_ERR];
  4769. dev->stats.rx_frame_errors += (unsigned long)(
  4770. mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] +
  4771. mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]);
  4772. dev->stats.tx_window_errors += (unsigned long)
  4773. mib->counter[MIB_COUNTER_TX_LATE_COLLISION];
  4774. }
  4775. return &dev->stats;
  4776. }
  4777. /**
  4778. * netdev_set_mac_address - set network device MAC address
  4779. * @dev: Network device.
  4780. * @addr: Buffer of MAC address.
  4781. *
  4782. * This function is used to set the MAC address of the network device.
  4783. *
  4784. * Return 0 to indicate success.
  4785. */
  4786. static int netdev_set_mac_address(struct net_device *dev, void *addr)
  4787. {
  4788. struct dev_priv *priv = netdev_priv(dev);
  4789. struct dev_info *hw_priv = priv->adapter;
  4790. struct ksz_hw *hw = &hw_priv->hw;
  4791. struct sockaddr *mac = addr;
  4792. uint interrupt;
  4793. if (priv->port.first_port > 0)
  4794. hw_del_addr(hw, dev->dev_addr);
  4795. else {
  4796. hw->mac_override = 1;
  4797. memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
  4798. }
  4799. eth_hw_addr_set(dev, mac->sa_data);
  4800. interrupt = hw_block_intr(hw);
  4801. if (priv->port.first_port > 0)
  4802. hw_add_addr(hw, dev->dev_addr);
  4803. else
  4804. hw_set_addr(hw);
  4805. hw_restore_intr(hw, interrupt);
  4806. return 0;
  4807. }
  4808. static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
  4809. struct ksz_hw *hw, int promiscuous)
  4810. {
  4811. if (promiscuous != priv->promiscuous) {
  4812. u8 prev_state = hw->promiscuous;
  4813. if (promiscuous)
  4814. ++hw->promiscuous;
  4815. else
  4816. --hw->promiscuous;
  4817. priv->promiscuous = promiscuous;
  4818. /* Turn on/off promiscuous mode. */
  4819. if (hw->promiscuous <= 1 && prev_state <= 1)
  4820. hw_set_promiscuous(hw, hw->promiscuous);
  4821. /*
  4822. * Port is not in promiscuous mode, meaning it is released
  4823. * from the bridge.
  4824. */
  4825. if ((hw->features & STP_SUPPORT) && !promiscuous &&
  4826. netif_is_bridge_port(dev)) {
  4827. struct ksz_switch *sw = hw->ksz_switch;
  4828. int port = priv->port.first_port;
  4829. port_set_stp_state(hw, port, STP_STATE_DISABLED);
  4830. port = 1 << port;
  4831. if (sw->member & port) {
  4832. sw->member &= ~port;
  4833. bridge_change(hw);
  4834. }
  4835. }
  4836. }
  4837. }
  4838. static void dev_set_multicast(struct dev_priv *priv, struct ksz_hw *hw,
  4839. int multicast)
  4840. {
  4841. if (multicast != priv->multicast) {
  4842. u8 all_multi = hw->all_multi;
  4843. if (multicast)
  4844. ++hw->all_multi;
  4845. else
  4846. --hw->all_multi;
  4847. priv->multicast = multicast;
  4848. /* Turn on/off all multicast mode. */
  4849. if (hw->all_multi <= 1 && all_multi <= 1)
  4850. hw_set_multicast(hw, hw->all_multi);
  4851. }
  4852. }
  4853. /**
  4854. * netdev_set_rx_mode
  4855. * @dev: Network device.
  4856. *
  4857. * This routine is used to set multicast addresses or put the network device
  4858. * into promiscuous mode.
  4859. */
  4860. static void netdev_set_rx_mode(struct net_device *dev)
  4861. {
  4862. struct dev_priv *priv = netdev_priv(dev);
  4863. struct dev_info *hw_priv = priv->adapter;
  4864. struct ksz_hw *hw = &hw_priv->hw;
  4865. struct netdev_hw_addr *ha;
  4866. int multicast = (dev->flags & IFF_ALLMULTI);
  4867. dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
  4868. if (hw_priv->hw.dev_count > 1)
  4869. multicast |= (dev->flags & IFF_MULTICAST);
  4870. dev_set_multicast(priv, hw, multicast);
  4871. /* Cannot use different hashes in multiple device interfaces mode. */
  4872. if (hw_priv->hw.dev_count > 1)
  4873. return;
  4874. if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
  4875. int i = 0;
  4876. /* List too big to support so turn on all multicast mode. */
  4877. if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) {
  4878. if (MAX_MULTICAST_LIST != hw->multi_list_size) {
  4879. hw->multi_list_size = MAX_MULTICAST_LIST;
  4880. ++hw->all_multi;
  4881. hw_set_multicast(hw, hw->all_multi);
  4882. }
  4883. return;
  4884. }
  4885. netdev_for_each_mc_addr(ha, dev) {
  4886. if (i >= MAX_MULTICAST_LIST)
  4887. break;
  4888. memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN);
  4889. }
  4890. hw->multi_list_size = (u8) i;
  4891. hw_set_grp_addr(hw);
  4892. } else {
  4893. if (MAX_MULTICAST_LIST == hw->multi_list_size) {
  4894. --hw->all_multi;
  4895. hw_set_multicast(hw, hw->all_multi);
  4896. }
  4897. hw->multi_list_size = 0;
  4898. hw_clr_multicast(hw);
  4899. }
  4900. }
  4901. static int netdev_change_mtu(struct net_device *dev, int new_mtu)
  4902. {
  4903. struct dev_priv *priv = netdev_priv(dev);
  4904. struct dev_info *hw_priv = priv->adapter;
  4905. struct ksz_hw *hw = &hw_priv->hw;
  4906. int hw_mtu;
  4907. if (netif_running(dev))
  4908. return -EBUSY;
  4909. /* Cannot use different MTU in multiple device interfaces mode. */
  4910. if (hw->dev_count > 1)
  4911. if (dev != hw_priv->dev)
  4912. return 0;
  4913. hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
  4914. if (hw_mtu > REGULAR_RX_BUF_SIZE) {
  4915. hw->features |= RX_HUGE_FRAME;
  4916. hw_mtu = MAX_RX_BUF_SIZE;
  4917. } else {
  4918. hw->features &= ~RX_HUGE_FRAME;
  4919. hw_mtu = REGULAR_RX_BUF_SIZE;
  4920. }
  4921. hw_mtu = (hw_mtu + 3) & ~3;
  4922. hw_priv->mtu = hw_mtu;
  4923. dev->mtu = new_mtu;
  4924. return 0;
  4925. }
  4926. /**
  4927. * netdev_ioctl - I/O control processing
  4928. * @dev: Network device.
  4929. * @ifr: Interface request structure.
  4930. * @cmd: I/O control code.
  4931. *
  4932. * This function is used to process I/O control calls.
  4933. *
  4934. * Return 0 to indicate success.
  4935. */
  4936. static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  4937. {
  4938. struct dev_priv *priv = netdev_priv(dev);
  4939. struct dev_info *hw_priv = priv->adapter;
  4940. struct ksz_hw *hw = &hw_priv->hw;
  4941. struct ksz_port *port = &priv->port;
  4942. int result = 0;
  4943. struct mii_ioctl_data *data = if_mii(ifr);
  4944. if (down_interruptible(&priv->proc_sem))
  4945. return -ERESTARTSYS;
  4946. switch (cmd) {
  4947. /* Get address of MII PHY in use. */
  4948. case SIOCGMIIPHY:
  4949. data->phy_id = priv->id;
  4950. fallthrough;
  4951. /* Read MII PHY register. */
  4952. case SIOCGMIIREG:
  4953. if (data->phy_id != priv->id || data->reg_num >= 6)
  4954. result = -EIO;
  4955. else
  4956. hw_r_phy(hw, port->linked->port_id, data->reg_num,
  4957. &data->val_out);
  4958. break;
  4959. /* Write MII PHY register. */
  4960. case SIOCSMIIREG:
  4961. if (!capable(CAP_NET_ADMIN))
  4962. result = -EPERM;
  4963. else if (data->phy_id != priv->id || data->reg_num >= 6)
  4964. result = -EIO;
  4965. else
  4966. hw_w_phy(hw, port->linked->port_id, data->reg_num,
  4967. data->val_in);
  4968. break;
  4969. default:
  4970. result = -EOPNOTSUPP;
  4971. }
  4972. up(&priv->proc_sem);
  4973. return result;
  4974. }
  4975. /*
  4976. * MII support
  4977. */
  4978. /**
  4979. * mdio_read - read PHY register
  4980. * @dev: Network device.
  4981. * @phy_id: The PHY id.
  4982. * @reg_num: The register number.
  4983. *
  4984. * This function returns the PHY register value.
  4985. *
  4986. * Return the register value.
  4987. */
  4988. static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
  4989. {
  4990. struct dev_priv *priv = netdev_priv(dev);
  4991. struct ksz_port *port = &priv->port;
  4992. struct ksz_hw *hw = port->hw;
  4993. u16 val_out;
  4994. hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out);
  4995. return val_out;
  4996. }
  4997. /**
  4998. * mdio_write - set PHY register
  4999. * @dev: Network device.
  5000. * @phy_id: The PHY id.
  5001. * @reg_num: The register number.
  5002. * @val: The register value.
  5003. *
  5004. * This procedure sets the PHY register value.
  5005. */
  5006. static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
  5007. {
  5008. struct dev_priv *priv = netdev_priv(dev);
  5009. struct ksz_port *port = &priv->port;
  5010. struct ksz_hw *hw = port->hw;
  5011. int i;
  5012. int pi;
  5013. for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++)
  5014. hw_w_phy(hw, pi, reg_num << 1, val);
  5015. }
  5016. /*
  5017. * ethtool support
  5018. */
  5019. #define EEPROM_SIZE 0x40
  5020. static u16 eeprom_data[EEPROM_SIZE] = { 0 };
  5021. #define ADVERTISED_ALL \
  5022. (ADVERTISED_10baseT_Half | \
  5023. ADVERTISED_10baseT_Full | \
  5024. ADVERTISED_100baseT_Half | \
  5025. ADVERTISED_100baseT_Full)
  5026. /* These functions use the MII functions in mii.c. */
  5027. /**
  5028. * netdev_get_link_ksettings - get network device settings
  5029. * @dev: Network device.
  5030. * @cmd: Ethtool command.
  5031. *
  5032. * This function queries the PHY and returns its state in the ethtool command.
  5033. *
  5034. * Return 0 if successful; otherwise an error code.
  5035. */
  5036. static int netdev_get_link_ksettings(struct net_device *dev,
  5037. struct ethtool_link_ksettings *cmd)
  5038. {
  5039. struct dev_priv *priv = netdev_priv(dev);
  5040. struct dev_info *hw_priv = priv->adapter;
  5041. mutex_lock(&hw_priv->lock);
  5042. mii_ethtool_get_link_ksettings(&priv->mii_if, cmd);
  5043. ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
  5044. mutex_unlock(&hw_priv->lock);
  5045. /* Save advertised settings for workaround in next function. */
  5046. ethtool_convert_link_mode_to_legacy_u32(&priv->advertising,
  5047. cmd->link_modes.advertising);
  5048. return 0;
  5049. }
  5050. /**
  5051. * netdev_set_link_ksettings - set network device settings
  5052. * @dev: Network device.
  5053. * @cmd: Ethtool command.
  5054. *
  5055. * This function sets the PHY according to the ethtool command.
  5056. *
  5057. * Return 0 if successful; otherwise an error code.
  5058. */
  5059. static int netdev_set_link_ksettings(struct net_device *dev,
  5060. const struct ethtool_link_ksettings *cmd)
  5061. {
  5062. struct dev_priv *priv = netdev_priv(dev);
  5063. struct dev_info *hw_priv = priv->adapter;
  5064. struct ksz_port *port = &priv->port;
  5065. struct ethtool_link_ksettings copy_cmd;
  5066. u32 speed = cmd->base.speed;
  5067. u32 advertising;
  5068. int rc;
  5069. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  5070. cmd->link_modes.advertising);
  5071. /*
  5072. * ethtool utility does not change advertised setting if auto
  5073. * negotiation is not specified explicitly.
  5074. */
  5075. if (cmd->base.autoneg && priv->advertising == advertising) {
  5076. advertising |= ADVERTISED_ALL;
  5077. if (10 == speed)
  5078. advertising &=
  5079. ~(ADVERTISED_100baseT_Full |
  5080. ADVERTISED_100baseT_Half);
  5081. else if (100 == speed)
  5082. advertising &=
  5083. ~(ADVERTISED_10baseT_Full |
  5084. ADVERTISED_10baseT_Half);
  5085. if (0 == cmd->base.duplex)
  5086. advertising &=
  5087. ~(ADVERTISED_100baseT_Full |
  5088. ADVERTISED_10baseT_Full);
  5089. else if (1 == cmd->base.duplex)
  5090. advertising &=
  5091. ~(ADVERTISED_100baseT_Half |
  5092. ADVERTISED_10baseT_Half);
  5093. }
  5094. mutex_lock(&hw_priv->lock);
  5095. if (cmd->base.autoneg &&
  5096. (advertising & ADVERTISED_ALL) == ADVERTISED_ALL) {
  5097. port->duplex = 0;
  5098. port->speed = 0;
  5099. port->force_link = 0;
  5100. } else {
  5101. port->duplex = cmd->base.duplex + 1;
  5102. if (1000 != speed)
  5103. port->speed = speed;
  5104. if (cmd->base.autoneg)
  5105. port->force_link = 0;
  5106. else
  5107. port->force_link = 1;
  5108. }
  5109. memcpy(&copy_cmd, cmd, sizeof(copy_cmd));
  5110. ethtool_convert_legacy_u32_to_link_mode(copy_cmd.link_modes.advertising,
  5111. advertising);
  5112. rc = mii_ethtool_set_link_ksettings(
  5113. &priv->mii_if,
  5114. (const struct ethtool_link_ksettings *)&copy_cmd);
  5115. mutex_unlock(&hw_priv->lock);
  5116. return rc;
  5117. }
  5118. /**
  5119. * netdev_nway_reset - restart auto-negotiation
  5120. * @dev: Network device.
  5121. *
  5122. * This function restarts the PHY for auto-negotiation.
  5123. *
  5124. * Return 0 if successful; otherwise an error code.
  5125. */
  5126. static int netdev_nway_reset(struct net_device *dev)
  5127. {
  5128. struct dev_priv *priv = netdev_priv(dev);
  5129. struct dev_info *hw_priv = priv->adapter;
  5130. int rc;
  5131. mutex_lock(&hw_priv->lock);
  5132. rc = mii_nway_restart(&priv->mii_if);
  5133. mutex_unlock(&hw_priv->lock);
  5134. return rc;
  5135. }
  5136. /**
  5137. * netdev_get_link - get network device link status
  5138. * @dev: Network device.
  5139. *
  5140. * This function gets the link status from the PHY.
  5141. *
  5142. * Return true if PHY is linked and false otherwise.
  5143. */
  5144. static u32 netdev_get_link(struct net_device *dev)
  5145. {
  5146. struct dev_priv *priv = netdev_priv(dev);
  5147. int rc;
  5148. rc = mii_link_ok(&priv->mii_if);
  5149. return rc;
  5150. }
  5151. /**
  5152. * netdev_get_drvinfo - get network driver information
  5153. * @dev: Network device.
  5154. * @info: Ethtool driver info data structure.
  5155. *
  5156. * This procedure returns the driver information.
  5157. */
  5158. static void netdev_get_drvinfo(struct net_device *dev,
  5159. struct ethtool_drvinfo *info)
  5160. {
  5161. struct dev_priv *priv = netdev_priv(dev);
  5162. struct dev_info *hw_priv = priv->adapter;
  5163. strscpy(info->driver, DRV_NAME, sizeof(info->driver));
  5164. strscpy(info->version, DRV_VERSION, sizeof(info->version));
  5165. strscpy(info->bus_info, pci_name(hw_priv->pdev),
  5166. sizeof(info->bus_info));
  5167. }
  5168. static struct hw_regs {
  5169. int start;
  5170. int end;
  5171. } hw_regs_range[] = {
  5172. { KS_DMA_TX_CTRL, KS884X_INTERRUPTS_STATUS },
  5173. { KS_ADD_ADDR_0_LO, KS_ADD_ADDR_F_HI },
  5174. { KS884X_ADDR_0_OFFSET, KS8841_WOL_FRAME_BYTE2_OFFSET },
  5175. { KS884X_SIDER_P, KS8842_SGCR7_P },
  5176. { KS8842_MACAR1_P, KS8842_TOSR8_P },
  5177. { KS884X_P1MBCR_P, KS8842_P3ERCR_P },
  5178. { 0, 0 }
  5179. };
  5180. /**
  5181. * netdev_get_regs_len - get length of register dump
  5182. * @dev: Network device.
  5183. *
  5184. * This function returns the length of the register dump.
  5185. *
  5186. * Return length of the register dump.
  5187. */
  5188. static int netdev_get_regs_len(struct net_device *dev)
  5189. {
  5190. struct hw_regs *range = hw_regs_range;
  5191. int regs_len = 0x10 * sizeof(u32);
  5192. while (range->end > range->start) {
  5193. regs_len += (range->end - range->start + 3) / 4 * 4;
  5194. range++;
  5195. }
  5196. return regs_len;
  5197. }
  5198. /**
  5199. * netdev_get_regs - get register dump
  5200. * @dev: Network device.
  5201. * @regs: Ethtool registers data structure.
  5202. * @ptr: Buffer to store the register values.
  5203. *
  5204. * This procedure dumps the register values in the provided buffer.
  5205. */
  5206. static void netdev_get_regs(struct net_device *dev, struct ethtool_regs *regs,
  5207. void *ptr)
  5208. {
  5209. struct dev_priv *priv = netdev_priv(dev);
  5210. struct dev_info *hw_priv = priv->adapter;
  5211. struct ksz_hw *hw = &hw_priv->hw;
  5212. int *buf = (int *) ptr;
  5213. struct hw_regs *range = hw_regs_range;
  5214. int len;
  5215. mutex_lock(&hw_priv->lock);
  5216. regs->version = 0;
  5217. for (len = 0; len < 0x40; len += 4) {
  5218. pci_read_config_dword(hw_priv->pdev, len, buf);
  5219. buf++;
  5220. }
  5221. while (range->end > range->start) {
  5222. for (len = range->start; len < range->end; len += 4) {
  5223. *buf = readl(hw->io + len);
  5224. buf++;
  5225. }
  5226. range++;
  5227. }
  5228. mutex_unlock(&hw_priv->lock);
  5229. }
  5230. #define WOL_SUPPORT \
  5231. (WAKE_PHY | WAKE_MAGIC | \
  5232. WAKE_UCAST | WAKE_MCAST | \
  5233. WAKE_BCAST | WAKE_ARP)
  5234. /**
  5235. * netdev_get_wol - get Wake-on-LAN support
  5236. * @dev: Network device.
  5237. * @wol: Ethtool Wake-on-LAN data structure.
  5238. *
  5239. * This procedure returns Wake-on-LAN support.
  5240. */
  5241. static void netdev_get_wol(struct net_device *dev,
  5242. struct ethtool_wolinfo *wol)
  5243. {
  5244. struct dev_priv *priv = netdev_priv(dev);
  5245. struct dev_info *hw_priv = priv->adapter;
  5246. wol->supported = hw_priv->wol_support;
  5247. wol->wolopts = hw_priv->wol_enable;
  5248. memset(&wol->sopass, 0, sizeof(wol->sopass));
  5249. }
  5250. /**
  5251. * netdev_set_wol - set Wake-on-LAN support
  5252. * @dev: Network device.
  5253. * @wol: Ethtool Wake-on-LAN data structure.
  5254. *
  5255. * This function sets Wake-on-LAN support.
  5256. *
  5257. * Return 0 if successful; otherwise an error code.
  5258. */
  5259. static int netdev_set_wol(struct net_device *dev,
  5260. struct ethtool_wolinfo *wol)
  5261. {
  5262. struct dev_priv *priv = netdev_priv(dev);
  5263. struct dev_info *hw_priv = priv->adapter;
  5264. /* Need to find a way to retrieve the device IP address. */
  5265. static const u8 net_addr[] = { 192, 168, 1, 1 };
  5266. if (wol->wolopts & ~hw_priv->wol_support)
  5267. return -EINVAL;
  5268. hw_priv->wol_enable = wol->wolopts;
  5269. /* Link wakeup cannot really be disabled. */
  5270. if (wol->wolopts)
  5271. hw_priv->wol_enable |= WAKE_PHY;
  5272. hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr);
  5273. return 0;
  5274. }
  5275. /**
  5276. * netdev_get_msglevel - get debug message level
  5277. * @dev: Network device.
  5278. *
  5279. * This function returns current debug message level.
  5280. *
  5281. * Return current debug message flags.
  5282. */
  5283. static u32 netdev_get_msglevel(struct net_device *dev)
  5284. {
  5285. struct dev_priv *priv = netdev_priv(dev);
  5286. return priv->msg_enable;
  5287. }
  5288. /**
  5289. * netdev_set_msglevel - set debug message level
  5290. * @dev: Network device.
  5291. * @value: Debug message flags.
  5292. *
  5293. * This procedure sets debug message level.
  5294. */
  5295. static void netdev_set_msglevel(struct net_device *dev, u32 value)
  5296. {
  5297. struct dev_priv *priv = netdev_priv(dev);
  5298. priv->msg_enable = value;
  5299. }
  5300. /**
  5301. * netdev_get_eeprom_len - get EEPROM length
  5302. * @dev: Network device.
  5303. *
  5304. * This function returns the length of the EEPROM.
  5305. *
  5306. * Return length of the EEPROM.
  5307. */
  5308. static int netdev_get_eeprom_len(struct net_device *dev)
  5309. {
  5310. return EEPROM_SIZE * 2;
  5311. }
  5312. #define EEPROM_MAGIC 0x10A18842
  5313. /**
  5314. * netdev_get_eeprom - get EEPROM data
  5315. * @dev: Network device.
  5316. * @eeprom: Ethtool EEPROM data structure.
  5317. * @data: Buffer to store the EEPROM data.
  5318. *
  5319. * This function dumps the EEPROM data in the provided buffer.
  5320. *
  5321. * Return 0 if successful; otherwise an error code.
  5322. */
  5323. static int netdev_get_eeprom(struct net_device *dev,
  5324. struct ethtool_eeprom *eeprom, u8 *data)
  5325. {
  5326. struct dev_priv *priv = netdev_priv(dev);
  5327. struct dev_info *hw_priv = priv->adapter;
  5328. u8 *eeprom_byte = (u8 *) eeprom_data;
  5329. int i;
  5330. int len;
  5331. len = (eeprom->offset + eeprom->len + 1) / 2;
  5332. for (i = eeprom->offset / 2; i < len; i++)
  5333. eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
  5334. eeprom->magic = EEPROM_MAGIC;
  5335. memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len);
  5336. return 0;
  5337. }
  5338. /**
  5339. * netdev_set_eeprom - write EEPROM data
  5340. * @dev: Network device.
  5341. * @eeprom: Ethtool EEPROM data structure.
  5342. * @data: Data buffer.
  5343. *
  5344. * This function modifies the EEPROM data one byte at a time.
  5345. *
  5346. * Return 0 if successful; otherwise an error code.
  5347. */
  5348. static int netdev_set_eeprom(struct net_device *dev,
  5349. struct ethtool_eeprom *eeprom, u8 *data)
  5350. {
  5351. struct dev_priv *priv = netdev_priv(dev);
  5352. struct dev_info *hw_priv = priv->adapter;
  5353. u16 eeprom_word[EEPROM_SIZE];
  5354. u8 *eeprom_byte = (u8 *) eeprom_word;
  5355. int i;
  5356. int len;
  5357. if (eeprom->magic != EEPROM_MAGIC)
  5358. return -EINVAL;
  5359. len = (eeprom->offset + eeprom->len + 1) / 2;
  5360. for (i = eeprom->offset / 2; i < len; i++)
  5361. eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
  5362. memcpy(eeprom_word, eeprom_data, EEPROM_SIZE * 2);
  5363. memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len);
  5364. for (i = 0; i < EEPROM_SIZE; i++)
  5365. if (eeprom_word[i] != eeprom_data[i]) {
  5366. eeprom_data[i] = eeprom_word[i];
  5367. eeprom_write(&hw_priv->hw, i, eeprom_data[i]);
  5368. }
  5369. return 0;
  5370. }
  5371. /**
  5372. * netdev_get_pauseparam - get flow control parameters
  5373. * @dev: Network device.
  5374. * @pause: Ethtool PAUSE settings data structure.
  5375. *
  5376. * This procedure returns the PAUSE control flow settings.
  5377. */
  5378. static void netdev_get_pauseparam(struct net_device *dev,
  5379. struct ethtool_pauseparam *pause)
  5380. {
  5381. struct dev_priv *priv = netdev_priv(dev);
  5382. struct dev_info *hw_priv = priv->adapter;
  5383. struct ksz_hw *hw = &hw_priv->hw;
  5384. pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1;
  5385. if (!hw->ksz_switch) {
  5386. pause->rx_pause =
  5387. (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0;
  5388. pause->tx_pause =
  5389. (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0;
  5390. } else {
  5391. pause->rx_pause =
  5392. (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5393. SWITCH_RX_FLOW_CTRL)) ? 1 : 0;
  5394. pause->tx_pause =
  5395. (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5396. SWITCH_TX_FLOW_CTRL)) ? 1 : 0;
  5397. }
  5398. }
  5399. /**
  5400. * netdev_set_pauseparam - set flow control parameters
  5401. * @dev: Network device.
  5402. * @pause: Ethtool PAUSE settings data structure.
  5403. *
  5404. * This function sets the PAUSE control flow settings.
  5405. * Not implemented yet.
  5406. *
  5407. * Return 0 if successful; otherwise an error code.
  5408. */
  5409. static int netdev_set_pauseparam(struct net_device *dev,
  5410. struct ethtool_pauseparam *pause)
  5411. {
  5412. struct dev_priv *priv = netdev_priv(dev);
  5413. struct dev_info *hw_priv = priv->adapter;
  5414. struct ksz_hw *hw = &hw_priv->hw;
  5415. struct ksz_port *port = &priv->port;
  5416. mutex_lock(&hw_priv->lock);
  5417. if (pause->autoneg) {
  5418. if (!pause->rx_pause && !pause->tx_pause)
  5419. port->flow_ctrl = PHY_NO_FLOW_CTRL;
  5420. else
  5421. port->flow_ctrl = PHY_FLOW_CTRL;
  5422. hw->overrides &= ~PAUSE_FLOW_CTRL;
  5423. port->force_link = 0;
  5424. if (hw->ksz_switch) {
  5425. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5426. SWITCH_RX_FLOW_CTRL, 1);
  5427. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5428. SWITCH_TX_FLOW_CTRL, 1);
  5429. }
  5430. port_set_link_speed(port);
  5431. } else {
  5432. hw->overrides |= PAUSE_FLOW_CTRL;
  5433. if (hw->ksz_switch) {
  5434. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5435. SWITCH_RX_FLOW_CTRL, pause->rx_pause);
  5436. sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
  5437. SWITCH_TX_FLOW_CTRL, pause->tx_pause);
  5438. } else
  5439. set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause);
  5440. }
  5441. mutex_unlock(&hw_priv->lock);
  5442. return 0;
  5443. }
  5444. /**
  5445. * netdev_get_ringparam - get tx/rx ring parameters
  5446. * @dev: Network device.
  5447. * @ring: Ethtool RING settings data structure.
  5448. * @kernel_ring: Ethtool external RING settings data structure.
  5449. * @extack: Netlink handle.
  5450. *
  5451. * This procedure returns the TX/RX ring settings.
  5452. */
  5453. static void netdev_get_ringparam(struct net_device *dev,
  5454. struct ethtool_ringparam *ring,
  5455. struct kernel_ethtool_ringparam *kernel_ring,
  5456. struct netlink_ext_ack *extack)
  5457. {
  5458. struct dev_priv *priv = netdev_priv(dev);
  5459. struct dev_info *hw_priv = priv->adapter;
  5460. struct ksz_hw *hw = &hw_priv->hw;
  5461. ring->tx_max_pending = (1 << 9);
  5462. ring->tx_pending = hw->tx_desc_info.alloc;
  5463. ring->rx_max_pending = (1 << 9);
  5464. ring->rx_pending = hw->rx_desc_info.alloc;
  5465. }
  5466. #define STATS_LEN (TOTAL_PORT_COUNTER_NUM)
  5467. static struct {
  5468. char string[ETH_GSTRING_LEN];
  5469. } ethtool_stats_keys[STATS_LEN] = {
  5470. { "rx_lo_priority_octets" },
  5471. { "rx_hi_priority_octets" },
  5472. { "rx_undersize_packets" },
  5473. { "rx_fragments" },
  5474. { "rx_oversize_packets" },
  5475. { "rx_jabbers" },
  5476. { "rx_symbol_errors" },
  5477. { "rx_crc_errors" },
  5478. { "rx_align_errors" },
  5479. { "rx_mac_ctrl_packets" },
  5480. { "rx_pause_packets" },
  5481. { "rx_bcast_packets" },
  5482. { "rx_mcast_packets" },
  5483. { "rx_ucast_packets" },
  5484. { "rx_64_or_less_octet_packets" },
  5485. { "rx_65_to_127_octet_packets" },
  5486. { "rx_128_to_255_octet_packets" },
  5487. { "rx_256_to_511_octet_packets" },
  5488. { "rx_512_to_1023_octet_packets" },
  5489. { "rx_1024_to_1522_octet_packets" },
  5490. { "tx_lo_priority_octets" },
  5491. { "tx_hi_priority_octets" },
  5492. { "tx_late_collisions" },
  5493. { "tx_pause_packets" },
  5494. { "tx_bcast_packets" },
  5495. { "tx_mcast_packets" },
  5496. { "tx_ucast_packets" },
  5497. { "tx_deferred" },
  5498. { "tx_total_collisions" },
  5499. { "tx_excessive_collisions" },
  5500. { "tx_single_collisions" },
  5501. { "tx_mult_collisions" },
  5502. { "rx_discards" },
  5503. { "tx_discards" },
  5504. };
  5505. /**
  5506. * netdev_get_strings - get statistics identity strings
  5507. * @dev: Network device.
  5508. * @stringset: String set identifier.
  5509. * @buf: Buffer to store the strings.
  5510. *
  5511. * This procedure returns the strings used to identify the statistics.
  5512. */
  5513. static void netdev_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
  5514. {
  5515. struct dev_priv *priv = netdev_priv(dev);
  5516. struct dev_info *hw_priv = priv->adapter;
  5517. struct ksz_hw *hw = &hw_priv->hw;
  5518. if (ETH_SS_STATS == stringset)
  5519. memcpy(buf, &ethtool_stats_keys,
  5520. ETH_GSTRING_LEN * hw->mib_cnt);
  5521. }
  5522. /**
  5523. * netdev_get_sset_count - get statistics size
  5524. * @dev: Network device.
  5525. * @sset: The statistics set number.
  5526. *
  5527. * This function returns the size of the statistics to be reported.
  5528. *
  5529. * Return size of the statistics to be reported.
  5530. */
  5531. static int netdev_get_sset_count(struct net_device *dev, int sset)
  5532. {
  5533. struct dev_priv *priv = netdev_priv(dev);
  5534. struct dev_info *hw_priv = priv->adapter;
  5535. struct ksz_hw *hw = &hw_priv->hw;
  5536. switch (sset) {
  5537. case ETH_SS_STATS:
  5538. return hw->mib_cnt;
  5539. default:
  5540. return -EOPNOTSUPP;
  5541. }
  5542. }
  5543. /**
  5544. * netdev_get_ethtool_stats - get network device statistics
  5545. * @dev: Network device.
  5546. * @stats: Ethtool statistics data structure.
  5547. * @data: Buffer to store the statistics.
  5548. *
  5549. * This procedure returns the statistics.
  5550. */
  5551. static void netdev_get_ethtool_stats(struct net_device *dev,
  5552. struct ethtool_stats *stats, u64 *data)
  5553. {
  5554. struct dev_priv *priv = netdev_priv(dev);
  5555. struct dev_info *hw_priv = priv->adapter;
  5556. struct ksz_hw *hw = &hw_priv->hw;
  5557. struct ksz_port *port = &priv->port;
  5558. int n_stats = stats->n_stats;
  5559. int i;
  5560. int n;
  5561. int p;
  5562. u64 counter[TOTAL_PORT_COUNTER_NUM];
  5563. mutex_lock(&hw_priv->lock);
  5564. n = SWITCH_PORT_NUM;
  5565. for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
  5566. if (media_connected == hw->port_mib[p].state) {
  5567. hw_priv->counter[p].read = 1;
  5568. /* Remember first port that requests read. */
  5569. if (n == SWITCH_PORT_NUM)
  5570. n = p;
  5571. }
  5572. }
  5573. mutex_unlock(&hw_priv->lock);
  5574. if (n < SWITCH_PORT_NUM)
  5575. schedule_work(&hw_priv->mib_read);
  5576. if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
  5577. p = n;
  5578. wait_event_interruptible_timeout(
  5579. hw_priv->counter[p].counter,
  5580. 2 == hw_priv->counter[p].read,
  5581. HZ * 1);
  5582. } else
  5583. for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
  5584. if (0 == i) {
  5585. wait_event_interruptible_timeout(
  5586. hw_priv->counter[p].counter,
  5587. 2 == hw_priv->counter[p].read,
  5588. HZ * 2);
  5589. } else if (hw->port_mib[p].cnt_ptr) {
  5590. wait_event_interruptible_timeout(
  5591. hw_priv->counter[p].counter,
  5592. 2 == hw_priv->counter[p].read,
  5593. HZ * 1);
  5594. }
  5595. }
  5596. get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter);
  5597. n = hw->mib_cnt;
  5598. if (n > n_stats)
  5599. n = n_stats;
  5600. n_stats -= n;
  5601. for (i = 0; i < n; i++)
  5602. *data++ = counter[i];
  5603. }
  5604. /**
  5605. * netdev_set_features - set receive checksum support
  5606. * @dev: Network device.
  5607. * @features: New device features (offloads).
  5608. *
  5609. * This function sets receive checksum support setting.
  5610. *
  5611. * Return 0 if successful; otherwise an error code.
  5612. */
  5613. static int netdev_set_features(struct net_device *dev,
  5614. netdev_features_t features)
  5615. {
  5616. struct dev_priv *priv = netdev_priv(dev);
  5617. struct dev_info *hw_priv = priv->adapter;
  5618. struct ksz_hw *hw = &hw_priv->hw;
  5619. mutex_lock(&hw_priv->lock);
  5620. /* see note in hw_setup() */
  5621. if (features & NETIF_F_RXCSUM)
  5622. hw->rx_cfg |= DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP;
  5623. else
  5624. hw->rx_cfg &= ~(DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
  5625. if (hw->enabled)
  5626. writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
  5627. mutex_unlock(&hw_priv->lock);
  5628. return 0;
  5629. }
  5630. static const struct ethtool_ops netdev_ethtool_ops = {
  5631. .nway_reset = netdev_nway_reset,
  5632. .get_link = netdev_get_link,
  5633. .get_drvinfo = netdev_get_drvinfo,
  5634. .get_regs_len = netdev_get_regs_len,
  5635. .get_regs = netdev_get_regs,
  5636. .get_wol = netdev_get_wol,
  5637. .set_wol = netdev_set_wol,
  5638. .get_msglevel = netdev_get_msglevel,
  5639. .set_msglevel = netdev_set_msglevel,
  5640. .get_eeprom_len = netdev_get_eeprom_len,
  5641. .get_eeprom = netdev_get_eeprom,
  5642. .set_eeprom = netdev_set_eeprom,
  5643. .get_pauseparam = netdev_get_pauseparam,
  5644. .set_pauseparam = netdev_set_pauseparam,
  5645. .get_ringparam = netdev_get_ringparam,
  5646. .get_strings = netdev_get_strings,
  5647. .get_sset_count = netdev_get_sset_count,
  5648. .get_ethtool_stats = netdev_get_ethtool_stats,
  5649. .get_link_ksettings = netdev_get_link_ksettings,
  5650. .set_link_ksettings = netdev_set_link_ksettings,
  5651. };
  5652. /*
  5653. * Hardware monitoring
  5654. */
  5655. static void update_link(struct net_device *dev, struct dev_priv *priv,
  5656. struct ksz_port *port)
  5657. {
  5658. if (priv->media_state != port->linked->state) {
  5659. priv->media_state = port->linked->state;
  5660. if (netif_running(dev))
  5661. set_media_state(dev, media_connected);
  5662. }
  5663. }
  5664. static void mib_read_work(struct work_struct *work)
  5665. {
  5666. struct dev_info *hw_priv =
  5667. container_of(work, struct dev_info, mib_read);
  5668. struct ksz_hw *hw = &hw_priv->hw;
  5669. unsigned long next_jiffies;
  5670. struct ksz_port_mib *mib;
  5671. int i;
  5672. next_jiffies = jiffies;
  5673. for (i = 0; i < hw->mib_port_cnt; i++) {
  5674. mib = &hw->port_mib[i];
  5675. /* Reading MIB counters or requested to read. */
  5676. if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) {
  5677. /* Need to process receive interrupt. */
  5678. if (port_r_cnt(hw, i))
  5679. break;
  5680. hw_priv->counter[i].read = 0;
  5681. /* Finish reading counters. */
  5682. if (0 == mib->cnt_ptr) {
  5683. hw_priv->counter[i].read = 2;
  5684. wake_up_interruptible(
  5685. &hw_priv->counter[i].counter);
  5686. }
  5687. } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) {
  5688. /* Only read MIB counters when the port is connected. */
  5689. if (media_connected == mib->state)
  5690. hw_priv->counter[i].read = 1;
  5691. next_jiffies += HZ * 1 * hw->mib_port_cnt;
  5692. hw_priv->counter[i].time = next_jiffies;
  5693. /* Port is just disconnected. */
  5694. } else if (mib->link_down) {
  5695. mib->link_down = 0;
  5696. /* Read counters one last time after link is lost. */
  5697. hw_priv->counter[i].read = 1;
  5698. }
  5699. }
  5700. }
  5701. static void mib_monitor(struct timer_list *t)
  5702. {
  5703. struct dev_info *hw_priv = from_timer(hw_priv, t, mib_timer_info.timer);
  5704. mib_read_work(&hw_priv->mib_read);
  5705. /* This is used to verify Wake-on-LAN is working. */
  5706. if (hw_priv->pme_wait) {
  5707. if (time_is_before_eq_jiffies(hw_priv->pme_wait)) {
  5708. hw_clr_wol_pme_status(&hw_priv->hw);
  5709. hw_priv->pme_wait = 0;
  5710. }
  5711. } else if (hw_chk_wol_pme_status(&hw_priv->hw)) {
  5712. /* PME is asserted. Wait 2 seconds to clear it. */
  5713. hw_priv->pme_wait = jiffies + HZ * 2;
  5714. }
  5715. ksz_update_timer(&hw_priv->mib_timer_info);
  5716. }
  5717. /**
  5718. * dev_monitor - periodic monitoring
  5719. * @t: timer list containing a network device pointer.
  5720. *
  5721. * This routine is run in a kernel timer to monitor the network device.
  5722. */
  5723. static void dev_monitor(struct timer_list *t)
  5724. {
  5725. struct dev_priv *priv = from_timer(priv, t, monitor_timer_info.timer);
  5726. struct net_device *dev = priv->mii_if.dev;
  5727. struct dev_info *hw_priv = priv->adapter;
  5728. struct ksz_hw *hw = &hw_priv->hw;
  5729. struct ksz_port *port = &priv->port;
  5730. if (!(hw->features & LINK_INT_WORKING))
  5731. port_get_link_speed(port);
  5732. update_link(dev, priv, port);
  5733. ksz_update_timer(&priv->monitor_timer_info);
  5734. }
  5735. /*
  5736. * Linux network device interface functions
  5737. */
  5738. /* Driver exported variables */
  5739. static int msg_enable;
  5740. static char *macaddr = ":";
  5741. static char *mac1addr = ":";
  5742. /*
  5743. * This enables multiple network device mode for KSZ8842, which contains a
  5744. * switch with two physical ports. Some users like to take control of the
  5745. * ports for running Spanning Tree Protocol. The driver will create an
  5746. * additional eth? device for the other port.
  5747. *
  5748. * Some limitations are the network devices cannot have different MTU and
  5749. * multicast hash tables.
  5750. */
  5751. static int multi_dev;
  5752. /*
  5753. * As most users select multiple network device mode to use Spanning Tree
  5754. * Protocol, this enables a feature in which most unicast and multicast packets
  5755. * are forwarded inside the switch and not passed to the host. Only packets
  5756. * that need the host's attention are passed to it. This prevents the host
  5757. * wasting CPU time to examine each and every incoming packets and do the
  5758. * forwarding itself.
  5759. *
  5760. * As the hack requires the private bridge header, the driver cannot compile
  5761. * with just the kernel headers.
  5762. *
  5763. * Enabling STP support also turns on multiple network device mode.
  5764. */
  5765. static int stp;
  5766. /*
  5767. * This enables fast aging in the KSZ8842 switch. Not sure what situation
  5768. * needs that. However, fast aging is used to flush the dynamic MAC table when
  5769. * STP support is enabled.
  5770. */
  5771. static int fast_aging;
  5772. /**
  5773. * netdev_init - initialize network device.
  5774. * @dev: Network device.
  5775. *
  5776. * This function initializes the network device.
  5777. *
  5778. * Return 0 if successful; otherwise an error code indicating failure.
  5779. */
  5780. static int __init netdev_init(struct net_device *dev)
  5781. {
  5782. struct dev_priv *priv = netdev_priv(dev);
  5783. /* 500 ms timeout */
  5784. ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000,
  5785. dev_monitor);
  5786. /* 500 ms timeout */
  5787. dev->watchdog_timeo = HZ / 2;
  5788. dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
  5789. /*
  5790. * Hardware does not really support IPv6 checksum generation, but
  5791. * driver actually runs faster with this on.
  5792. */
  5793. dev->hw_features |= NETIF_F_IPV6_CSUM;
  5794. dev->features |= dev->hw_features;
  5795. sema_init(&priv->proc_sem, 1);
  5796. priv->mii_if.phy_id_mask = 0x1;
  5797. priv->mii_if.reg_num_mask = 0x7;
  5798. priv->mii_if.dev = dev;
  5799. priv->mii_if.mdio_read = mdio_read;
  5800. priv->mii_if.mdio_write = mdio_write;
  5801. priv->mii_if.phy_id = priv->port.first_port + 1;
  5802. priv->msg_enable = netif_msg_init(msg_enable,
  5803. (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK));
  5804. return 0;
  5805. }
  5806. static const struct net_device_ops netdev_ops = {
  5807. .ndo_init = netdev_init,
  5808. .ndo_open = netdev_open,
  5809. .ndo_stop = netdev_close,
  5810. .ndo_get_stats = netdev_query_statistics,
  5811. .ndo_start_xmit = netdev_tx,
  5812. .ndo_tx_timeout = netdev_tx_timeout,
  5813. .ndo_change_mtu = netdev_change_mtu,
  5814. .ndo_set_features = netdev_set_features,
  5815. .ndo_set_mac_address = netdev_set_mac_address,
  5816. .ndo_validate_addr = eth_validate_addr,
  5817. .ndo_eth_ioctl = netdev_ioctl,
  5818. .ndo_set_rx_mode = netdev_set_rx_mode,
  5819. #ifdef CONFIG_NET_POLL_CONTROLLER
  5820. .ndo_poll_controller = netdev_netpoll,
  5821. #endif
  5822. };
  5823. static void netdev_free(struct net_device *dev)
  5824. {
  5825. if (dev->watchdog_timeo)
  5826. unregister_netdev(dev);
  5827. free_netdev(dev);
  5828. }
  5829. struct platform_info {
  5830. struct dev_info dev_info;
  5831. struct net_device *netdev[SWITCH_PORT_NUM];
  5832. };
  5833. static int net_device_present;
  5834. static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
  5835. {
  5836. int i;
  5837. int j;
  5838. int got_num;
  5839. int num;
  5840. i = j = num = got_num = 0;
  5841. while (j < ETH_ALEN) {
  5842. if (macaddr[i]) {
  5843. int digit;
  5844. got_num = 1;
  5845. digit = hex_to_bin(macaddr[i]);
  5846. if (digit >= 0)
  5847. num = num * 16 + digit;
  5848. else if (':' == macaddr[i])
  5849. got_num = 2;
  5850. else
  5851. break;
  5852. } else if (got_num)
  5853. got_num = 2;
  5854. else
  5855. break;
  5856. if (2 == got_num) {
  5857. if (MAIN_PORT == port) {
  5858. hw_priv->hw.override_addr[j++] = (u8) num;
  5859. hw_priv->hw.override_addr[5] +=
  5860. hw_priv->hw.id;
  5861. } else {
  5862. hw_priv->hw.ksz_switch->other_addr[j++] =
  5863. (u8) num;
  5864. hw_priv->hw.ksz_switch->other_addr[5] +=
  5865. hw_priv->hw.id;
  5866. }
  5867. num = got_num = 0;
  5868. }
  5869. i++;
  5870. }
  5871. if (ETH_ALEN == j) {
  5872. if (MAIN_PORT == port)
  5873. hw_priv->hw.mac_override = 1;
  5874. }
  5875. }
  5876. #define KS884X_DMA_MASK (~0x0UL)
  5877. static void read_other_addr(struct ksz_hw *hw)
  5878. {
  5879. int i;
  5880. u16 data[3];
  5881. struct ksz_switch *sw = hw->ksz_switch;
  5882. for (i = 0; i < 3; i++)
  5883. data[i] = eeprom_read(hw, i + EEPROM_DATA_OTHER_MAC_ADDR);
  5884. if ((data[0] || data[1] || data[2]) && data[0] != 0xffff) {
  5885. sw->other_addr[5] = (u8) data[0];
  5886. sw->other_addr[4] = (u8)(data[0] >> 8);
  5887. sw->other_addr[3] = (u8) data[1];
  5888. sw->other_addr[2] = (u8)(data[1] >> 8);
  5889. sw->other_addr[1] = (u8) data[2];
  5890. sw->other_addr[0] = (u8)(data[2] >> 8);
  5891. }
  5892. }
  5893. #ifndef PCI_VENDOR_ID_MICREL_KS
  5894. #define PCI_VENDOR_ID_MICREL_KS 0x16c6
  5895. #endif
  5896. static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
  5897. {
  5898. struct net_device *dev;
  5899. struct dev_priv *priv;
  5900. struct dev_info *hw_priv;
  5901. struct ksz_hw *hw;
  5902. struct platform_info *info;
  5903. struct ksz_port *port;
  5904. unsigned long reg_base;
  5905. unsigned long reg_len;
  5906. int cnt;
  5907. int i;
  5908. int mib_port_count;
  5909. int pi;
  5910. int port_count;
  5911. int result;
  5912. char banner[sizeof(version)];
  5913. struct ksz_switch *sw = NULL;
  5914. result = pcim_enable_device(pdev);
  5915. if (result)
  5916. return result;
  5917. result = -ENODEV;
  5918. if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) ||
  5919. dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))
  5920. return result;
  5921. reg_base = pci_resource_start(pdev, 0);
  5922. reg_len = pci_resource_len(pdev, 0);
  5923. if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0)
  5924. return result;
  5925. if (!request_mem_region(reg_base, reg_len, DRV_NAME))
  5926. return result;
  5927. pci_set_master(pdev);
  5928. result = -ENOMEM;
  5929. info = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
  5930. if (!info)
  5931. goto pcidev_init_dev_err;
  5932. hw_priv = &info->dev_info;
  5933. hw_priv->pdev = pdev;
  5934. hw = &hw_priv->hw;
  5935. hw->io = ioremap(reg_base, reg_len);
  5936. if (!hw->io)
  5937. goto pcidev_init_io_err;
  5938. cnt = hw_init(hw);
  5939. if (!cnt) {
  5940. if (msg_enable & NETIF_MSG_PROBE)
  5941. pr_alert("chip not detected\n");
  5942. result = -ENODEV;
  5943. goto pcidev_init_alloc_err;
  5944. }
  5945. snprintf(banner, sizeof(banner), "%s", version);
  5946. banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */
  5947. dev_info(&hw_priv->pdev->dev, "%s\n", banner);
  5948. dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
  5949. /* Assume device is KSZ8841. */
  5950. hw->dev_count = 1;
  5951. port_count = 1;
  5952. mib_port_count = 1;
  5953. hw->addr_list_size = 0;
  5954. hw->mib_cnt = PORT_COUNTER_NUM;
  5955. hw->mib_port_cnt = 1;
  5956. /* KSZ8842 has a switch with multiple ports. */
  5957. if (2 == cnt) {
  5958. if (fast_aging)
  5959. hw->overrides |= FAST_AGING;
  5960. hw->mib_cnt = TOTAL_PORT_COUNTER_NUM;
  5961. /* Multiple network device interfaces are required. */
  5962. if (multi_dev) {
  5963. hw->dev_count = SWITCH_PORT_NUM;
  5964. hw->addr_list_size = SWITCH_PORT_NUM - 1;
  5965. }
  5966. /* Single network device has multiple ports. */
  5967. if (1 == hw->dev_count) {
  5968. port_count = SWITCH_PORT_NUM;
  5969. mib_port_count = SWITCH_PORT_NUM;
  5970. }
  5971. hw->mib_port_cnt = TOTAL_PORT_NUM;
  5972. hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL);
  5973. if (!hw->ksz_switch)
  5974. goto pcidev_init_alloc_err;
  5975. sw = hw->ksz_switch;
  5976. }
  5977. for (i = 0; i < hw->mib_port_cnt; i++)
  5978. hw->port_mib[i].mib_start = 0;
  5979. hw->parent = hw_priv;
  5980. /* Default MTU is 1500. */
  5981. hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
  5982. if (ksz_alloc_mem(hw_priv))
  5983. goto pcidev_init_mem_err;
  5984. hw_priv->hw.id = net_device_present;
  5985. spin_lock_init(&hw_priv->hwlock);
  5986. mutex_init(&hw_priv->lock);
  5987. for (i = 0; i < TOTAL_PORT_NUM; i++)
  5988. init_waitqueue_head(&hw_priv->counter[i].counter);
  5989. if (macaddr[0] != ':')
  5990. get_mac_addr(hw_priv, macaddr, MAIN_PORT);
  5991. /* Read MAC address and initialize override address if not overridden. */
  5992. hw_read_addr(hw);
  5993. /* Multiple device interfaces mode requires a second MAC address. */
  5994. if (hw->dev_count > 1) {
  5995. memcpy(sw->other_addr, hw->override_addr, ETH_ALEN);
  5996. read_other_addr(hw);
  5997. if (mac1addr[0] != ':')
  5998. get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
  5999. }
  6000. hw_setup(hw);
  6001. if (hw->ksz_switch)
  6002. sw_setup(hw);
  6003. else {
  6004. hw_priv->wol_support = WOL_SUPPORT;
  6005. hw_priv->wol_enable = 0;
  6006. }
  6007. INIT_WORK(&hw_priv->mib_read, mib_read_work);
  6008. /* 500 ms timeout */
  6009. ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
  6010. mib_monitor);
  6011. for (i = 0; i < hw->dev_count; i++) {
  6012. dev = alloc_etherdev(sizeof(struct dev_priv));
  6013. if (!dev)
  6014. goto pcidev_init_reg_err;
  6015. SET_NETDEV_DEV(dev, &pdev->dev);
  6016. info->netdev[i] = dev;
  6017. priv = netdev_priv(dev);
  6018. priv->adapter = hw_priv;
  6019. priv->id = net_device_present++;
  6020. port = &priv->port;
  6021. port->port_cnt = port_count;
  6022. port->mib_port_cnt = mib_port_count;
  6023. port->first_port = i;
  6024. port->flow_ctrl = PHY_FLOW_CTRL;
  6025. port->hw = hw;
  6026. port->linked = &hw->port_info[port->first_port];
  6027. for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) {
  6028. hw->port_info[pi].port_id = pi;
  6029. hw->port_info[pi].pdev = dev;
  6030. hw->port_info[pi].state = media_disconnected;
  6031. }
  6032. dev->mem_start = (unsigned long) hw->io;
  6033. dev->mem_end = dev->mem_start + reg_len - 1;
  6034. dev->irq = pdev->irq;
  6035. if (MAIN_PORT == i)
  6036. eth_hw_addr_set(dev, hw_priv->hw.override_addr);
  6037. else {
  6038. u8 addr[ETH_ALEN];
  6039. ether_addr_copy(addr, sw->other_addr);
  6040. if (ether_addr_equal(sw->other_addr, hw->override_addr))
  6041. addr[5] += port->first_port;
  6042. eth_hw_addr_set(dev, addr);
  6043. }
  6044. dev->netdev_ops = &netdev_ops;
  6045. dev->ethtool_ops = &netdev_ethtool_ops;
  6046. /* MTU range: 60 - 1894 */
  6047. dev->min_mtu = ETH_ZLEN;
  6048. dev->max_mtu = MAX_RX_BUF_SIZE -
  6049. (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
  6050. if (register_netdev(dev))
  6051. goto pcidev_init_reg_err;
  6052. port_set_power_saving(port, true);
  6053. }
  6054. pci_dev_get(hw_priv->pdev);
  6055. pci_set_drvdata(pdev, info);
  6056. return 0;
  6057. pcidev_init_reg_err:
  6058. for (i = 0; i < hw->dev_count; i++) {
  6059. if (info->netdev[i]) {
  6060. netdev_free(info->netdev[i]);
  6061. info->netdev[i] = NULL;
  6062. }
  6063. }
  6064. pcidev_init_mem_err:
  6065. ksz_free_mem(hw_priv);
  6066. kfree(hw->ksz_switch);
  6067. pcidev_init_alloc_err:
  6068. iounmap(hw->io);
  6069. pcidev_init_io_err:
  6070. kfree(info);
  6071. pcidev_init_dev_err:
  6072. release_mem_region(reg_base, reg_len);
  6073. return result;
  6074. }
  6075. static void pcidev_exit(struct pci_dev *pdev)
  6076. {
  6077. int i;
  6078. struct platform_info *info = pci_get_drvdata(pdev);
  6079. struct dev_info *hw_priv = &info->dev_info;
  6080. release_mem_region(pci_resource_start(pdev, 0),
  6081. pci_resource_len(pdev, 0));
  6082. for (i = 0; i < hw_priv->hw.dev_count; i++) {
  6083. if (info->netdev[i])
  6084. netdev_free(info->netdev[i]);
  6085. }
  6086. if (hw_priv->hw.io)
  6087. iounmap(hw_priv->hw.io);
  6088. ksz_free_mem(hw_priv);
  6089. kfree(hw_priv->hw.ksz_switch);
  6090. pci_dev_put(hw_priv->pdev);
  6091. kfree(info);
  6092. }
  6093. static int __maybe_unused pcidev_resume(struct device *dev_d)
  6094. {
  6095. int i;
  6096. struct platform_info *info = dev_get_drvdata(dev_d);
  6097. struct dev_info *hw_priv = &info->dev_info;
  6098. struct ksz_hw *hw = &hw_priv->hw;
  6099. device_wakeup_disable(dev_d);
  6100. if (hw_priv->wol_enable)
  6101. hw_cfg_wol_pme(hw, 0);
  6102. for (i = 0; i < hw->dev_count; i++) {
  6103. if (info->netdev[i]) {
  6104. struct net_device *dev = info->netdev[i];
  6105. if (netif_running(dev)) {
  6106. netdev_open(dev);
  6107. netif_device_attach(dev);
  6108. }
  6109. }
  6110. }
  6111. return 0;
  6112. }
  6113. static int __maybe_unused pcidev_suspend(struct device *dev_d)
  6114. {
  6115. int i;
  6116. struct platform_info *info = dev_get_drvdata(dev_d);
  6117. struct dev_info *hw_priv = &info->dev_info;
  6118. struct ksz_hw *hw = &hw_priv->hw;
  6119. /* Need to find a way to retrieve the device IP address. */
  6120. static const u8 net_addr[] = { 192, 168, 1, 1 };
  6121. for (i = 0; i < hw->dev_count; i++) {
  6122. if (info->netdev[i]) {
  6123. struct net_device *dev = info->netdev[i];
  6124. if (netif_running(dev)) {
  6125. netif_device_detach(dev);
  6126. netdev_close(dev);
  6127. }
  6128. }
  6129. }
  6130. if (hw_priv->wol_enable) {
  6131. hw_enable_wol(hw, hw_priv->wol_enable, net_addr);
  6132. hw_cfg_wol_pme(hw, 1);
  6133. }
  6134. device_wakeup_enable(dev_d);
  6135. return 0;
  6136. }
  6137. static char pcidev_name[] = "ksz884xp";
  6138. static const struct pci_device_id pcidev_table[] = {
  6139. { PCI_VENDOR_ID_MICREL_KS, 0x8841,
  6140. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
  6141. { PCI_VENDOR_ID_MICREL_KS, 0x8842,
  6142. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
  6143. { 0 }
  6144. };
  6145. MODULE_DEVICE_TABLE(pci, pcidev_table);
  6146. static SIMPLE_DEV_PM_OPS(pcidev_pm_ops, pcidev_suspend, pcidev_resume);
  6147. static struct pci_driver pci_device_driver = {
  6148. .driver.pm = &pcidev_pm_ops,
  6149. .name = pcidev_name,
  6150. .id_table = pcidev_table,
  6151. .probe = pcidev_init,
  6152. .remove = pcidev_exit
  6153. };
  6154. module_pci_driver(pci_device_driver);
  6155. MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
  6156. MODULE_AUTHOR("Tristram Ha <[email protected]>");
  6157. MODULE_LICENSE("GPL");
  6158. module_param_named(message, msg_enable, int, 0);
  6159. MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
  6160. module_param(macaddr, charp, 0);
  6161. module_param(mac1addr, charp, 0);
  6162. module_param(fast_aging, int, 0);
  6163. module_param(multi_dev, int, 0);
  6164. module_param(stp, int, 0);
  6165. MODULE_PARM_DESC(macaddr, "MAC address");
  6166. MODULE_PARM_DESC(mac1addr, "Second MAC address");
  6167. MODULE_PARM_DESC(fast_aging, "Fast aging");
  6168. MODULE_PARM_DESC(multi_dev, "Multiple device interfaces");
  6169. MODULE_PARM_DESC(stp, "STP support");