qla_init.c 264 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic Fibre Channel HBA Driver
  4. * Copyright (c) 2003-2014 QLogic Corporation
  5. */
  6. #include "qla_def.h"
  7. #include "qla_gbl.h"
  8. #include <linux/delay.h>
  9. #include <linux/slab.h>
  10. #include <linux/vmalloc.h>
  11. #include "qla_devtbl.h"
  12. #ifdef CONFIG_SPARC
  13. #include <asm/prom.h>
  14. #endif
  15. #include "qla_target.h"
  16. /*
  17. * QLogic ISP2x00 Hardware Support Function Prototypes.
  18. */
  19. static int qla2x00_isp_firmware(scsi_qla_host_t *);
  20. static int qla2x00_setup_chip(scsi_qla_host_t *);
  21. static int qla2x00_fw_ready(scsi_qla_host_t *);
  22. static int qla2x00_configure_hba(scsi_qla_host_t *);
  23. static int qla2x00_configure_loop(scsi_qla_host_t *);
  24. static int qla2x00_configure_local_loop(scsi_qla_host_t *);
  25. static int qla2x00_configure_fabric(scsi_qla_host_t *);
  26. static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
  27. static int qla2x00_restart_isp(scsi_qla_host_t *);
  28. static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
  29. static int qla84xx_init_chip(scsi_qla_host_t *);
  30. static int qla25xx_init_queues(struct qla_hw_data *);
  31. static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
  32. struct event_arg *ea);
  33. static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
  34. struct event_arg *);
  35. static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
  36. /* SRB Extensions ---------------------------------------------------------- */
  37. void
  38. qla2x00_sp_timeout(struct timer_list *t)
  39. {
  40. srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
  41. struct srb_iocb *iocb;
  42. scsi_qla_host_t *vha = sp->vha;
  43. WARN_ON(irqs_disabled());
  44. iocb = &sp->u.iocb_cmd;
  45. iocb->timeout(sp);
  46. /* ref: TMR */
  47. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  48. if (vha && qla2x00_isp_reg_stat(vha->hw)) {
  49. ql_log(ql_log_info, vha, 0x9008,
  50. "PCI/Register disconnect.\n");
  51. qla_pci_set_eeh_busy(vha);
  52. }
  53. }
  54. void qla2x00_sp_free(srb_t *sp)
  55. {
  56. struct srb_iocb *iocb = &sp->u.iocb_cmd;
  57. del_timer(&iocb->timer);
  58. qla2x00_rel_sp(sp);
  59. }
  60. void qla2xxx_rel_done_warning(srb_t *sp, int res)
  61. {
  62. WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
  63. }
  64. void qla2xxx_rel_free_warning(srb_t *sp)
  65. {
  66. WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
  67. }
  68. /* Asynchronous Login/Logout Routines -------------------------------------- */
  69. unsigned long
  70. qla2x00_get_async_timeout(struct scsi_qla_host *vha)
  71. {
  72. unsigned long tmo;
  73. struct qla_hw_data *ha = vha->hw;
  74. /* Firmware should use switch negotiated r_a_tov for timeout. */
  75. tmo = ha->r_a_tov / 10 * 2;
  76. if (IS_QLAFX00(ha)) {
  77. tmo = FX00_DEF_RATOV * 2;
  78. } else if (!IS_FWI2_CAPABLE(ha)) {
  79. /*
  80. * Except for earlier ISPs where the timeout is seeded from the
  81. * initialization control block.
  82. */
  83. tmo = ha->login_timeout;
  84. }
  85. return tmo;
  86. }
  87. static void qla24xx_abort_iocb_timeout(void *data)
  88. {
  89. srb_t *sp = data;
  90. struct srb_iocb *abt = &sp->u.iocb_cmd;
  91. struct qla_qpair *qpair = sp->qpair;
  92. u32 handle;
  93. unsigned long flags;
  94. int sp_found = 0, cmdsp_found = 0;
  95. if (sp->cmd_sp)
  96. ql_dbg(ql_dbg_async, sp->vha, 0x507c,
  97. "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n",
  98. sp->cmd_sp->handle, sp->cmd_sp->type,
  99. sp->handle, sp->type);
  100. else
  101. ql_dbg(ql_dbg_async, sp->vha, 0x507c,
  102. "Abort timeout 2 - hdl=%x, type=%x\n",
  103. sp->handle, sp->type);
  104. spin_lock_irqsave(qpair->qp_lock_ptr, flags);
  105. for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
  106. if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] ==
  107. sp->cmd_sp)) {
  108. qpair->req->outstanding_cmds[handle] = NULL;
  109. cmdsp_found = 1;
  110. qla_put_fw_resources(qpair, &sp->cmd_sp->iores);
  111. }
  112. /* removing the abort */
  113. if (qpair->req->outstanding_cmds[handle] == sp) {
  114. qpair->req->outstanding_cmds[handle] = NULL;
  115. sp_found = 1;
  116. qla_put_fw_resources(qpair, &sp->iores);
  117. break;
  118. }
  119. }
  120. spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
  121. if (cmdsp_found && sp->cmd_sp) {
  122. /*
  123. * This done function should take care of
  124. * original command ref: INIT
  125. */
  126. sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
  127. }
  128. if (sp_found) {
  129. abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
  130. sp->done(sp, QLA_OS_TIMER_EXPIRED);
  131. }
  132. }
  133. static void qla24xx_abort_sp_done(srb_t *sp, int res)
  134. {
  135. struct srb_iocb *abt = &sp->u.iocb_cmd;
  136. srb_t *orig_sp = sp->cmd_sp;
  137. if (orig_sp)
  138. qla_wait_nvme_release_cmd_kref(orig_sp);
  139. if (sp->flags & SRB_WAKEUP_ON_COMP)
  140. complete(&abt->u.abt.comp);
  141. else
  142. /* ref: INIT */
  143. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  144. }
  145. int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
  146. {
  147. scsi_qla_host_t *vha = cmd_sp->vha;
  148. struct srb_iocb *abt_iocb;
  149. srb_t *sp;
  150. int rval = QLA_FUNCTION_FAILED;
  151. /* ref: INIT for ABTS command */
  152. sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
  153. GFP_ATOMIC);
  154. if (!sp)
  155. return QLA_MEMORY_ALLOC_FAILED;
  156. qla_vha_mark_busy(vha);
  157. abt_iocb = &sp->u.iocb_cmd;
  158. sp->type = SRB_ABT_CMD;
  159. sp->name = "abort";
  160. sp->qpair = cmd_sp->qpair;
  161. sp->cmd_sp = cmd_sp;
  162. if (wait)
  163. sp->flags = SRB_WAKEUP_ON_COMP;
  164. init_completion(&abt_iocb->u.abt.comp);
  165. /* FW can send 2 x ABTS's timeout/20s */
  166. qla2x00_init_async_sp(sp, 42, qla24xx_abort_sp_done);
  167. sp->u.iocb_cmd.timeout = qla24xx_abort_iocb_timeout;
  168. abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
  169. abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
  170. ql_dbg(ql_dbg_async, vha, 0x507c,
  171. "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
  172. cmd_sp->type);
  173. rval = qla2x00_start_sp(sp);
  174. if (rval != QLA_SUCCESS) {
  175. /* ref: INIT */
  176. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  177. return rval;
  178. }
  179. if (wait) {
  180. wait_for_completion(&abt_iocb->u.abt.comp);
  181. rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
  182. QLA_SUCCESS : QLA_ERR_FROM_FW;
  183. /* ref: INIT */
  184. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  185. }
  186. return rval;
  187. }
  188. void
  189. qla2x00_async_iocb_timeout(void *data)
  190. {
  191. srb_t *sp = data;
  192. fc_port_t *fcport = sp->fcport;
  193. struct srb_iocb *lio = &sp->u.iocb_cmd;
  194. int rc, h;
  195. unsigned long flags;
  196. if (fcport) {
  197. ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
  198. "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
  199. sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
  200. fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  201. } else {
  202. pr_info("Async-%s timeout - hdl=%x.\n",
  203. sp->name, sp->handle);
  204. }
  205. switch (sp->type) {
  206. case SRB_LOGIN_CMD:
  207. rc = qla24xx_async_abort_cmd(sp, false);
  208. if (rc) {
  209. /* Retry as needed. */
  210. lio->u.logio.data[0] = MBS_COMMAND_ERROR;
  211. lio->u.logio.data[1] =
  212. lio->u.logio.flags & SRB_LOGIN_RETRIED ?
  213. QLA_LOGIO_LOGIN_RETRIED : 0;
  214. spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
  215. for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
  216. h++) {
  217. if (sp->qpair->req->outstanding_cmds[h] ==
  218. sp) {
  219. sp->qpair->req->outstanding_cmds[h] =
  220. NULL;
  221. break;
  222. }
  223. }
  224. spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
  225. sp->done(sp, QLA_FUNCTION_TIMEOUT);
  226. }
  227. break;
  228. case SRB_LOGOUT_CMD:
  229. case SRB_CT_PTHRU_CMD:
  230. case SRB_MB_IOCB:
  231. case SRB_NACK_PLOGI:
  232. case SRB_NACK_PRLI:
  233. case SRB_NACK_LOGO:
  234. case SRB_CTRL_VP:
  235. default:
  236. rc = qla24xx_async_abort_cmd(sp, false);
  237. if (rc) {
  238. spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
  239. for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
  240. h++) {
  241. if (sp->qpair->req->outstanding_cmds[h] ==
  242. sp) {
  243. sp->qpair->req->outstanding_cmds[h] =
  244. NULL;
  245. break;
  246. }
  247. }
  248. spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
  249. sp->done(sp, QLA_FUNCTION_TIMEOUT);
  250. }
  251. break;
  252. }
  253. }
  254. static void qla2x00_async_login_sp_done(srb_t *sp, int res)
  255. {
  256. struct scsi_qla_host *vha = sp->vha;
  257. struct srb_iocb *lio = &sp->u.iocb_cmd;
  258. struct event_arg ea;
  259. ql_dbg(ql_dbg_disc, vha, 0x20dd,
  260. "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
  261. sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  262. if (!test_bit(UNLOADING, &vha->dpc_flags)) {
  263. memset(&ea, 0, sizeof(ea));
  264. ea.fcport = sp->fcport;
  265. ea.data[0] = lio->u.logio.data[0];
  266. ea.data[1] = lio->u.logio.data[1];
  267. ea.iop[0] = lio->u.logio.iop[0];
  268. ea.iop[1] = lio->u.logio.iop[1];
  269. ea.sp = sp;
  270. if (res)
  271. ea.data[0] = MBS_COMMAND_ERROR;
  272. qla24xx_handle_plogi_done_event(vha, &ea);
  273. }
  274. /* ref: INIT */
  275. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  276. }
  277. int
  278. qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
  279. uint16_t *data)
  280. {
  281. srb_t *sp;
  282. struct srb_iocb *lio;
  283. int rval = QLA_FUNCTION_FAILED;
  284. if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
  285. fcport->loop_id == FC_NO_LOOP_ID) {
  286. ql_log(ql_log_warn, vha, 0xffff,
  287. "%s: %8phC - not sending command.\n",
  288. __func__, fcport->port_name);
  289. return rval;
  290. }
  291. /* ref: INIT */
  292. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  293. if (!sp)
  294. goto done;
  295. qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
  296. fcport->flags |= FCF_ASYNC_SENT;
  297. fcport->logout_completed = 0;
  298. sp->type = SRB_LOGIN_CMD;
  299. sp->name = "login";
  300. sp->gen1 = fcport->rscn_gen;
  301. sp->gen2 = fcport->login_gen;
  302. qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
  303. qla2x00_async_login_sp_done);
  304. lio = &sp->u.iocb_cmd;
  305. if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
  306. lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
  307. } else {
  308. if (vha->hw->flags.edif_enabled &&
  309. DBELL_ACTIVE(vha)) {
  310. lio->u.logio.flags |=
  311. (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI);
  312. } else {
  313. lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
  314. }
  315. }
  316. if (NVME_TARGET(vha->hw, fcport))
  317. lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
  318. rval = qla2x00_start_sp(sp);
  319. ql_dbg(ql_dbg_disc, vha, 0x2072,
  320. "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
  321. fcport->port_name, sp->handle, fcport->loop_id,
  322. fcport->d_id.b24, fcport->login_retry,
  323. lio->u.logio.flags & SRB_LOGIN_FCSP ? "FCSP" : "");
  324. if (rval != QLA_SUCCESS) {
  325. fcport->flags |= FCF_LOGIN_NEEDED;
  326. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  327. goto done_free_sp;
  328. }
  329. return rval;
  330. done_free_sp:
  331. /* ref: INIT */
  332. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  333. fcport->flags &= ~FCF_ASYNC_SENT;
  334. done:
  335. fcport->flags &= ~FCF_ASYNC_ACTIVE;
  336. return rval;
  337. }
  338. static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
  339. {
  340. sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  341. sp->fcport->login_gen++;
  342. qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]);
  343. /* ref: INIT */
  344. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  345. }
  346. int
  347. qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
  348. {
  349. srb_t *sp;
  350. int rval = QLA_FUNCTION_FAILED;
  351. fcport->flags |= FCF_ASYNC_SENT;
  352. /* ref: INIT */
  353. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  354. if (!sp)
  355. goto done;
  356. sp->type = SRB_LOGOUT_CMD;
  357. sp->name = "logout";
  358. qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
  359. qla2x00_async_logout_sp_done),
  360. ql_dbg(ql_dbg_disc, vha, 0x2070,
  361. "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n",
  362. sp->handle, fcport->loop_id, fcport->d_id.b.domain,
  363. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  364. fcport->port_name, fcport->explicit_logout);
  365. rval = qla2x00_start_sp(sp);
  366. if (rval != QLA_SUCCESS)
  367. goto done_free_sp;
  368. return rval;
  369. done_free_sp:
  370. /* ref: INIT */
  371. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  372. done:
  373. fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  374. return rval;
  375. }
  376. void
  377. qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
  378. uint16_t *data)
  379. {
  380. fcport->flags &= ~FCF_ASYNC_ACTIVE;
  381. /* Don't re-login in target mode */
  382. if (!fcport->tgt_session)
  383. qla2x00_mark_device_lost(vha, fcport, 1);
  384. qlt_logo_completion_handler(fcport, data[0]);
  385. }
  386. static void qla2x00_async_prlo_sp_done(srb_t *sp, int res)
  387. {
  388. struct srb_iocb *lio = &sp->u.iocb_cmd;
  389. struct scsi_qla_host *vha = sp->vha;
  390. sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
  391. if (!test_bit(UNLOADING, &vha->dpc_flags))
  392. qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
  393. lio->u.logio.data);
  394. /* ref: INIT */
  395. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  396. }
  397. int
  398. qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
  399. {
  400. srb_t *sp;
  401. int rval;
  402. rval = QLA_FUNCTION_FAILED;
  403. /* ref: INIT */
  404. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  405. if (!sp)
  406. goto done;
  407. sp->type = SRB_PRLO_CMD;
  408. sp->name = "prlo";
  409. qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
  410. qla2x00_async_prlo_sp_done);
  411. ql_dbg(ql_dbg_disc, vha, 0x2070,
  412. "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
  413. sp->handle, fcport->loop_id, fcport->d_id.b.domain,
  414. fcport->d_id.b.area, fcport->d_id.b.al_pa);
  415. rval = qla2x00_start_sp(sp);
  416. if (rval != QLA_SUCCESS)
  417. goto done_free_sp;
  418. return rval;
  419. done_free_sp:
  420. /* ref: INIT */
  421. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  422. done:
  423. fcport->flags &= ~FCF_ASYNC_ACTIVE;
  424. return rval;
  425. }
  426. static
  427. void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
  428. {
  429. struct fc_port *fcport = ea->fcport;
  430. unsigned long flags;
  431. ql_dbg(ql_dbg_disc, vha, 0x20d2,
  432. "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
  433. __func__, fcport->port_name, fcport->disc_state,
  434. fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
  435. fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
  436. WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
  437. ea->data[0]);
  438. if (ea->data[0] != MBS_COMMAND_COMPLETE) {
  439. ql_dbg(ql_dbg_disc, vha, 0x2066,
  440. "%s %8phC: adisc fail: post delete\n",
  441. __func__, ea->fcport->port_name);
  442. spin_lock_irqsave(&vha->work_lock, flags);
  443. /* deleted = 0 & logout_on_delete = force fw cleanup */
  444. if (fcport->deleted == QLA_SESS_DELETED)
  445. fcport->deleted = 0;
  446. fcport->logout_on_delete = 1;
  447. spin_unlock_irqrestore(&vha->work_lock, flags);
  448. qlt_schedule_sess_for_deletion(ea->fcport);
  449. return;
  450. }
  451. if (ea->fcport->disc_state == DSC_DELETE_PEND)
  452. return;
  453. if (ea->sp->gen2 != ea->fcport->login_gen) {
  454. /* target side must have changed it. */
  455. ql_dbg(ql_dbg_disc, vha, 0x20d3,
  456. "%s %8phC generation changed\n",
  457. __func__, ea->fcport->port_name);
  458. return;
  459. } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
  460. qla_rscn_replay(fcport);
  461. qlt_schedule_sess_for_deletion(fcport);
  462. return;
  463. }
  464. __qla24xx_handle_gpdb_event(vha, ea);
  465. }
  466. static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
  467. {
  468. struct qla_work_evt *e;
  469. e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
  470. if (!e)
  471. return QLA_FUNCTION_FAILED;
  472. e->u.fcport.fcport = fcport;
  473. fcport->flags |= FCF_ASYNC_ACTIVE;
  474. qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
  475. return qla2x00_post_work(vha, e);
  476. }
  477. static void qla2x00_async_adisc_sp_done(srb_t *sp, int res)
  478. {
  479. struct scsi_qla_host *vha = sp->vha;
  480. struct event_arg ea;
  481. struct srb_iocb *lio = &sp->u.iocb_cmd;
  482. ql_dbg(ql_dbg_disc, vha, 0x2066,
  483. "Async done-%s res %x %8phC\n",
  484. sp->name, res, sp->fcport->port_name);
  485. sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  486. memset(&ea, 0, sizeof(ea));
  487. ea.rc = res;
  488. ea.data[0] = lio->u.logio.data[0];
  489. ea.data[1] = lio->u.logio.data[1];
  490. ea.iop[0] = lio->u.logio.iop[0];
  491. ea.iop[1] = lio->u.logio.iop[1];
  492. ea.fcport = sp->fcport;
  493. ea.sp = sp;
  494. if (res)
  495. ea.data[0] = MBS_COMMAND_ERROR;
  496. qla24xx_handle_adisc_event(vha, &ea);
  497. /* ref: INIT */
  498. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  499. }
  500. int
  501. qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
  502. uint16_t *data)
  503. {
  504. srb_t *sp;
  505. struct srb_iocb *lio;
  506. int rval = QLA_FUNCTION_FAILED;
  507. if (IS_SESSION_DELETED(fcport)) {
  508. ql_log(ql_log_warn, vha, 0xffff,
  509. "%s: %8phC is being delete - not sending command.\n",
  510. __func__, fcport->port_name);
  511. fcport->flags &= ~FCF_ASYNC_ACTIVE;
  512. return rval;
  513. }
  514. if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
  515. return rval;
  516. fcport->flags |= FCF_ASYNC_SENT;
  517. /* ref: INIT */
  518. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  519. if (!sp)
  520. goto done;
  521. sp->type = SRB_ADISC_CMD;
  522. sp->name = "adisc";
  523. sp->gen1 = fcport->rscn_gen;
  524. sp->gen2 = fcport->login_gen;
  525. qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
  526. qla2x00_async_adisc_sp_done);
  527. if (data[1] & QLA_LOGIO_LOGIN_RETRIED) {
  528. lio = &sp->u.iocb_cmd;
  529. lio->u.logio.flags |= SRB_LOGIN_RETRIED;
  530. }
  531. ql_dbg(ql_dbg_disc, vha, 0x206f,
  532. "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
  533. sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
  534. rval = qla2x00_start_sp(sp);
  535. if (rval != QLA_SUCCESS)
  536. goto done_free_sp;
  537. return rval;
  538. done_free_sp:
  539. /* ref: INIT */
  540. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  541. done:
  542. fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  543. qla2x00_post_async_adisc_work(vha, fcport, data);
  544. return rval;
  545. }
  546. static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
  547. {
  548. struct qla_hw_data *ha = vha->hw;
  549. if (IS_FWI2_CAPABLE(ha))
  550. return loop_id > NPH_LAST_HANDLE;
  551. return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
  552. loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST;
  553. }
  554. /**
  555. * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID
  556. * @vha: adapter state pointer.
  557. * @dev: port structure pointer.
  558. *
  559. * Returns:
  560. * qla2x00 local function return status code.
  561. *
  562. * Context:
  563. * Kernel context.
  564. */
  565. static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
  566. {
  567. int rval;
  568. struct qla_hw_data *ha = vha->hw;
  569. unsigned long flags = 0;
  570. rval = QLA_SUCCESS;
  571. spin_lock_irqsave(&ha->vport_slock, flags);
  572. dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE);
  573. if (dev->loop_id >= LOOPID_MAP_SIZE ||
  574. qla2x00_is_reserved_id(vha, dev->loop_id)) {
  575. dev->loop_id = FC_NO_LOOP_ID;
  576. rval = QLA_FUNCTION_FAILED;
  577. } else {
  578. set_bit(dev->loop_id, ha->loop_id_map);
  579. }
  580. spin_unlock_irqrestore(&ha->vport_slock, flags);
  581. if (rval == QLA_SUCCESS)
  582. ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
  583. "Assigning new loopid=%x, portid=%x.\n",
  584. dev->loop_id, dev->d_id.b24);
  585. else
  586. ql_log(ql_log_warn, dev->vha, 0x2087,
  587. "No loop_id's available, portid=%x.\n",
  588. dev->d_id.b24);
  589. return rval;
  590. }
  591. void qla2x00_clear_loop_id(fc_port_t *fcport)
  592. {
  593. struct qla_hw_data *ha = fcport->vha->hw;
  594. if (fcport->loop_id == FC_NO_LOOP_ID ||
  595. qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
  596. return;
  597. clear_bit(fcport->loop_id, ha->loop_id_map);
  598. fcport->loop_id = FC_NO_LOOP_ID;
  599. }
  600. static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
  601. struct event_arg *ea)
  602. {
  603. fc_port_t *fcport, *conflict_fcport;
  604. struct get_name_list_extended *e;
  605. u16 i, n, found = 0, loop_id;
  606. port_id_t id;
  607. u64 wwn;
  608. u16 data[2];
  609. u8 current_login_state, nvme_cls;
  610. fcport = ea->fcport;
  611. ql_dbg(ql_dbg_disc, vha, 0xffff,
  612. "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n",
  613. __func__, fcport->port_name, fcport->disc_state,
  614. fcport->fw_login_state, ea->rc,
  615. fcport->login_gen, fcport->last_login_gen,
  616. fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable);
  617. if (fcport->disc_state == DSC_DELETE_PEND)
  618. return;
  619. if (ea->rc) { /* rval */
  620. if (fcport->login_retry == 0) {
  621. ql_dbg(ql_dbg_disc, vha, 0x20de,
  622. "GNL failed Port login retry %8phN, retry cnt=%d.\n",
  623. fcport->port_name, fcport->login_retry);
  624. }
  625. return;
  626. }
  627. if (fcport->last_rscn_gen != fcport->rscn_gen) {
  628. qla_rscn_replay(fcport);
  629. qlt_schedule_sess_for_deletion(fcport);
  630. return;
  631. } else if (fcport->last_login_gen != fcport->login_gen) {
  632. ql_dbg(ql_dbg_disc, vha, 0x20e0,
  633. "%s %8phC login gen changed\n",
  634. __func__, fcport->port_name);
  635. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  636. return;
  637. }
  638. n = ea->data[0] / sizeof(struct get_name_list_extended);
  639. ql_dbg(ql_dbg_disc, vha, 0x20e1,
  640. "%s %d %8phC n %d %02x%02x%02x lid %d \n",
  641. __func__, __LINE__, fcport->port_name, n,
  642. fcport->d_id.b.domain, fcport->d_id.b.area,
  643. fcport->d_id.b.al_pa, fcport->loop_id);
  644. for (i = 0; i < n; i++) {
  645. e = &vha->gnl.l[i];
  646. wwn = wwn_to_u64(e->port_name);
  647. id.b.domain = e->port_id[2];
  648. id.b.area = e->port_id[1];
  649. id.b.al_pa = e->port_id[0];
  650. id.b.rsvd_1 = 0;
  651. if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
  652. continue;
  653. if (IS_SW_RESV_ADDR(id))
  654. continue;
  655. found = 1;
  656. loop_id = le16_to_cpu(e->nport_handle);
  657. loop_id = (loop_id & 0x7fff);
  658. nvme_cls = e->current_login_state >> 4;
  659. current_login_state = e->current_login_state & 0xf;
  660. if (PRLI_PHASE(nvme_cls)) {
  661. current_login_state = nvme_cls;
  662. fcport->fc4_type &= ~FS_FC4TYPE_FCP;
  663. fcport->fc4_type |= FS_FC4TYPE_NVME;
  664. } else if (PRLI_PHASE(current_login_state)) {
  665. fcport->fc4_type |= FS_FC4TYPE_FCP;
  666. fcport->fc4_type &= ~FS_FC4TYPE_NVME;
  667. }
  668. ql_dbg(ql_dbg_disc, vha, 0x20e2,
  669. "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
  670. __func__, fcport->port_name,
  671. e->current_login_state, fcport->fw_login_state,
  672. fcport->fc4_type, id.b24, fcport->d_id.b24,
  673. loop_id, fcport->loop_id);
  674. switch (fcport->disc_state) {
  675. case DSC_DELETE_PEND:
  676. case DSC_DELETED:
  677. break;
  678. default:
  679. if ((id.b24 != fcport->d_id.b24 &&
  680. fcport->d_id.b24 &&
  681. fcport->loop_id != FC_NO_LOOP_ID) ||
  682. (fcport->loop_id != FC_NO_LOOP_ID &&
  683. fcport->loop_id != loop_id)) {
  684. ql_dbg(ql_dbg_disc, vha, 0x20e3,
  685. "%s %d %8phC post del sess\n",
  686. __func__, __LINE__, fcport->port_name);
  687. if (fcport->n2n_flag)
  688. fcport->d_id.b24 = 0;
  689. qlt_schedule_sess_for_deletion(fcport);
  690. return;
  691. }
  692. break;
  693. }
  694. fcport->loop_id = loop_id;
  695. if (fcport->n2n_flag)
  696. fcport->d_id.b24 = id.b24;
  697. wwn = wwn_to_u64(fcport->port_name);
  698. qlt_find_sess_invalidate_other(vha, wwn,
  699. id, loop_id, &conflict_fcport);
  700. if (conflict_fcport) {
  701. /*
  702. * Another share fcport share the same loop_id &
  703. * nport id. Conflict fcport needs to finish
  704. * cleanup before this fcport can proceed to login.
  705. */
  706. conflict_fcport->conflict = fcport;
  707. fcport->login_pause = 1;
  708. }
  709. switch (vha->hw->current_topology) {
  710. default:
  711. switch (current_login_state) {
  712. case DSC_LS_PRLI_COMP:
  713. ql_dbg(ql_dbg_disc,
  714. vha, 0x20e4, "%s %d %8phC post gpdb\n",
  715. __func__, __LINE__, fcport->port_name);
  716. if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
  717. fcport->port_type = FCT_INITIATOR;
  718. else
  719. fcport->port_type = FCT_TARGET;
  720. data[0] = data[1] = 0;
  721. qla2x00_post_async_adisc_work(vha, fcport,
  722. data);
  723. break;
  724. case DSC_LS_PLOGI_COMP:
  725. if (vha->hw->flags.edif_enabled) {
  726. /* check to see if App support Secure */
  727. qla24xx_post_gpdb_work(vha, fcport, 0);
  728. break;
  729. }
  730. fallthrough;
  731. case DSC_LS_PORT_UNAVAIL:
  732. default:
  733. if (fcport->loop_id == FC_NO_LOOP_ID) {
  734. qla2x00_find_new_loop_id(vha, fcport);
  735. fcport->fw_login_state =
  736. DSC_LS_PORT_UNAVAIL;
  737. }
  738. ql_dbg(ql_dbg_disc, vha, 0x20e5,
  739. "%s %d %8phC\n", __func__, __LINE__,
  740. fcport->port_name);
  741. qla24xx_fcport_handle_login(vha, fcport);
  742. break;
  743. }
  744. break;
  745. case ISP_CFG_N:
  746. fcport->fw_login_state = current_login_state;
  747. fcport->d_id = id;
  748. switch (current_login_state) {
  749. case DSC_LS_PRLI_PEND:
  750. /*
  751. * In the middle of PRLI. Let it finish.
  752. * Allow relogin code to recheck state again
  753. * with GNL. Push disc_state back to DELETED
  754. * so GNL can go out again
  755. */
  756. qla2x00_set_fcport_disc_state(fcport,
  757. DSC_DELETED);
  758. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  759. break;
  760. case DSC_LS_PRLI_COMP:
  761. if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
  762. fcport->port_type = FCT_INITIATOR;
  763. else
  764. fcport->port_type = FCT_TARGET;
  765. data[0] = data[1] = 0;
  766. qla2x00_post_async_adisc_work(vha, fcport,
  767. data);
  768. break;
  769. case DSC_LS_PLOGI_COMP:
  770. if (vha->hw->flags.edif_enabled &&
  771. DBELL_ACTIVE(vha)) {
  772. /* check to see if App support secure or not */
  773. qla24xx_post_gpdb_work(vha, fcport, 0);
  774. break;
  775. }
  776. if (fcport_is_bigger(fcport)) {
  777. /* local adapter is smaller */
  778. if (fcport->loop_id != FC_NO_LOOP_ID)
  779. qla2x00_clear_loop_id(fcport);
  780. fcport->loop_id = loop_id;
  781. qla24xx_fcport_handle_login(vha,
  782. fcport);
  783. break;
  784. }
  785. fallthrough;
  786. default:
  787. if (fcport_is_smaller(fcport)) {
  788. /* local adapter is bigger */
  789. if (fcport->loop_id != FC_NO_LOOP_ID)
  790. qla2x00_clear_loop_id(fcport);
  791. fcport->loop_id = loop_id;
  792. qla24xx_fcport_handle_login(vha,
  793. fcport);
  794. }
  795. break;
  796. }
  797. break;
  798. } /* switch (ha->current_topology) */
  799. }
  800. if (!found) {
  801. switch (vha->hw->current_topology) {
  802. case ISP_CFG_F:
  803. case ISP_CFG_FL:
  804. for (i = 0; i < n; i++) {
  805. e = &vha->gnl.l[i];
  806. id.b.domain = e->port_id[0];
  807. id.b.area = e->port_id[1];
  808. id.b.al_pa = e->port_id[2];
  809. id.b.rsvd_1 = 0;
  810. loop_id = le16_to_cpu(e->nport_handle);
  811. if (fcport->d_id.b24 == id.b24) {
  812. conflict_fcport =
  813. qla2x00_find_fcport_by_wwpn(vha,
  814. e->port_name, 0);
  815. if (conflict_fcport) {
  816. ql_dbg(ql_dbg_disc + ql_dbg_verbose,
  817. vha, 0x20e5,
  818. "%s %d %8phC post del sess\n",
  819. __func__, __LINE__,
  820. conflict_fcport->port_name);
  821. qlt_schedule_sess_for_deletion
  822. (conflict_fcport);
  823. }
  824. }
  825. /*
  826. * FW already picked this loop id for
  827. * another fcport
  828. */
  829. if (fcport->loop_id == loop_id)
  830. fcport->loop_id = FC_NO_LOOP_ID;
  831. }
  832. qla24xx_fcport_handle_login(vha, fcport);
  833. break;
  834. case ISP_CFG_N:
  835. qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
  836. if (time_after_eq(jiffies, fcport->dm_login_expire)) {
  837. if (fcport->n2n_link_reset_cnt < 2) {
  838. fcport->n2n_link_reset_cnt++;
  839. /*
  840. * remote port is not sending PLOGI.
  841. * Reset link to kick start his state
  842. * machine
  843. */
  844. set_bit(N2N_LINK_RESET,
  845. &vha->dpc_flags);
  846. } else {
  847. if (fcport->n2n_chip_reset < 1) {
  848. ql_log(ql_log_info, vha, 0x705d,
  849. "Chip reset to bring laser down");
  850. set_bit(ISP_ABORT_NEEDED,
  851. &vha->dpc_flags);
  852. fcport->n2n_chip_reset++;
  853. } else {
  854. ql_log(ql_log_info, vha, 0x705d,
  855. "Remote port %8ph is not coming back\n",
  856. fcport->port_name);
  857. fcport->scan_state = 0;
  858. }
  859. }
  860. qla2xxx_wake_dpc(vha);
  861. } else {
  862. /*
  863. * report port suppose to do PLOGI. Give him
  864. * more time. FW will catch it.
  865. */
  866. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  867. }
  868. break;
  869. case ISP_CFG_NL:
  870. qla24xx_fcport_handle_login(vha, fcport);
  871. break;
  872. default:
  873. break;
  874. }
  875. }
  876. } /* gnl_event */
  877. static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
  878. {
  879. struct scsi_qla_host *vha = sp->vha;
  880. unsigned long flags;
  881. struct fc_port *fcport = NULL, *tf;
  882. u16 i, n = 0, loop_id;
  883. struct event_arg ea;
  884. struct get_name_list_extended *e;
  885. u64 wwn;
  886. struct list_head h;
  887. bool found = false;
  888. ql_dbg(ql_dbg_disc, vha, 0x20e7,
  889. "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
  890. sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
  891. sp->u.iocb_cmd.u.mbx.in_mb[2]);
  892. sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
  893. memset(&ea, 0, sizeof(ea));
  894. ea.sp = sp;
  895. ea.rc = res;
  896. if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
  897. sizeof(struct get_name_list_extended)) {
  898. n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
  899. sizeof(struct get_name_list_extended);
  900. ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
  901. }
  902. for (i = 0; i < n; i++) {
  903. e = &vha->gnl.l[i];
  904. loop_id = le16_to_cpu(e->nport_handle);
  905. /* mask out reserve bit */
  906. loop_id = (loop_id & 0x7fff);
  907. set_bit(loop_id, vha->hw->loop_id_map);
  908. wwn = wwn_to_u64(e->port_name);
  909. ql_dbg(ql_dbg_disc, vha, 0x20e8,
  910. "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
  911. __func__, &wwn, e->port_id[2], e->port_id[1],
  912. e->port_id[0], e->current_login_state, e->last_login_state,
  913. (loop_id & 0x7fff));
  914. }
  915. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  916. INIT_LIST_HEAD(&h);
  917. fcport = tf = NULL;
  918. if (!list_empty(&vha->gnl.fcports))
  919. list_splice_init(&vha->gnl.fcports, &h);
  920. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  921. list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
  922. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  923. list_del_init(&fcport->gnl_entry);
  924. fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  925. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  926. ea.fcport = fcport;
  927. qla24xx_handle_gnl_done_event(vha, &ea);
  928. }
  929. /* create new fcport if fw has knowledge of new sessions */
  930. for (i = 0; i < n; i++) {
  931. port_id_t id;
  932. u64 wwnn;
  933. e = &vha->gnl.l[i];
  934. wwn = wwn_to_u64(e->port_name);
  935. found = false;
  936. list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
  937. if (!memcmp((u8 *)&wwn, fcport->port_name,
  938. WWN_SIZE)) {
  939. found = true;
  940. break;
  941. }
  942. }
  943. id.b.domain = e->port_id[2];
  944. id.b.area = e->port_id[1];
  945. id.b.al_pa = e->port_id[0];
  946. id.b.rsvd_1 = 0;
  947. if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
  948. ql_dbg(ql_dbg_disc, vha, 0x2065,
  949. "%s %d %8phC %06x post new sess\n",
  950. __func__, __LINE__, (u8 *)&wwn, id.b24);
  951. wwnn = wwn_to_u64(e->node_name);
  952. qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
  953. (u8 *)&wwnn, NULL, 0);
  954. }
  955. }
  956. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  957. vha->gnl.sent = 0;
  958. if (!list_empty(&vha->gnl.fcports)) {
  959. /* retrigger gnl */
  960. list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports,
  961. gnl_entry) {
  962. list_del_init(&fcport->gnl_entry);
  963. fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  964. if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS)
  965. break;
  966. }
  967. }
  968. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  969. /* ref: INIT */
  970. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  971. }
  972. int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
  973. {
  974. srb_t *sp;
  975. int rval = QLA_FUNCTION_FAILED;
  976. unsigned long flags;
  977. u16 *mb;
  978. if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
  979. goto done;
  980. ql_dbg(ql_dbg_disc, vha, 0x20d9,
  981. "Async-gnlist WWPN %8phC \n", fcport->port_name);
  982. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  983. fcport->flags |= FCF_ASYNC_SENT;
  984. qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
  985. fcport->last_rscn_gen = fcport->rscn_gen;
  986. fcport->last_login_gen = fcport->login_gen;
  987. list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
  988. if (vha->gnl.sent) {
  989. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  990. return QLA_SUCCESS;
  991. }
  992. vha->gnl.sent = 1;
  993. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  994. /* ref: INIT */
  995. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  996. if (!sp)
  997. goto done;
  998. sp->type = SRB_MB_IOCB;
  999. sp->name = "gnlist";
  1000. sp->gen1 = fcport->rscn_gen;
  1001. sp->gen2 = fcport->login_gen;
  1002. qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
  1003. qla24xx_async_gnl_sp_done);
  1004. mb = sp->u.iocb_cmd.u.mbx.out_mb;
  1005. mb[0] = MBC_PORT_NODE_NAME_LIST;
  1006. mb[1] = BIT_2 | BIT_3;
  1007. mb[2] = MSW(vha->gnl.ldma);
  1008. mb[3] = LSW(vha->gnl.ldma);
  1009. mb[6] = MSW(MSD(vha->gnl.ldma));
  1010. mb[7] = LSW(MSD(vha->gnl.ldma));
  1011. mb[8] = vha->gnl.size;
  1012. mb[9] = vha->vp_idx;
  1013. ql_dbg(ql_dbg_disc, vha, 0x20da,
  1014. "Async-%s - OUT WWPN %8phC hndl %x\n",
  1015. sp->name, fcport->port_name, sp->handle);
  1016. rval = qla2x00_start_sp(sp);
  1017. if (rval != QLA_SUCCESS)
  1018. goto done_free_sp;
  1019. return rval;
  1020. done_free_sp:
  1021. /* ref: INIT */
  1022. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  1023. fcport->flags &= ~(FCF_ASYNC_SENT);
  1024. done:
  1025. fcport->flags &= ~(FCF_ASYNC_ACTIVE);
  1026. return rval;
  1027. }
  1028. int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
  1029. {
  1030. struct qla_work_evt *e;
  1031. e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
  1032. if (!e)
  1033. return QLA_FUNCTION_FAILED;
  1034. e->u.fcport.fcport = fcport;
  1035. fcport->flags |= FCF_ASYNC_ACTIVE;
  1036. return qla2x00_post_work(vha, e);
  1037. }
  1038. static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res)
  1039. {
  1040. struct scsi_qla_host *vha = sp->vha;
  1041. struct qla_hw_data *ha = vha->hw;
  1042. fc_port_t *fcport = sp->fcport;
  1043. u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
  1044. struct event_arg ea;
  1045. ql_dbg(ql_dbg_disc, vha, 0x20db,
  1046. "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
  1047. sp->name, res, fcport->port_name, mb[1], mb[2]);
  1048. fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
  1049. if (res == QLA_FUNCTION_TIMEOUT)
  1050. goto done;
  1051. memset(&ea, 0, sizeof(ea));
  1052. ea.fcport = fcport;
  1053. ea.sp = sp;
  1054. qla24xx_handle_gpdb_event(vha, &ea);
  1055. done:
  1056. dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
  1057. sp->u.iocb_cmd.u.mbx.in_dma);
  1058. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  1059. }
  1060. int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
  1061. {
  1062. struct qla_work_evt *e;
  1063. if (vha->host->active_mode == MODE_TARGET)
  1064. return QLA_FUNCTION_FAILED;
  1065. e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
  1066. if (!e)
  1067. return QLA_FUNCTION_FAILED;
  1068. e->u.fcport.fcport = fcport;
  1069. return qla2x00_post_work(vha, e);
  1070. }
  1071. static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
  1072. {
  1073. struct scsi_qla_host *vha = sp->vha;
  1074. struct srb_iocb *lio = &sp->u.iocb_cmd;
  1075. struct event_arg ea;
  1076. ql_dbg(ql_dbg_disc, vha, 0x2129,
  1077. "%s %8phC res %x\n", __func__,
  1078. sp->fcport->port_name, res);
  1079. sp->fcport->flags &= ~FCF_ASYNC_SENT;
  1080. if (!test_bit(UNLOADING, &vha->dpc_flags)) {
  1081. memset(&ea, 0, sizeof(ea));
  1082. ea.fcport = sp->fcport;
  1083. ea.data[0] = lio->u.logio.data[0];
  1084. ea.data[1] = lio->u.logio.data[1];
  1085. ea.iop[0] = lio->u.logio.iop[0];
  1086. ea.iop[1] = lio->u.logio.iop[1];
  1087. ea.sp = sp;
  1088. if (res == QLA_OS_TIMER_EXPIRED)
  1089. ea.data[0] = QLA_OS_TIMER_EXPIRED;
  1090. else if (res)
  1091. ea.data[0] = MBS_COMMAND_ERROR;
  1092. qla24xx_handle_prli_done_event(vha, &ea);
  1093. }
  1094. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  1095. }
  1096. int
  1097. qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
  1098. {
  1099. srb_t *sp;
  1100. struct srb_iocb *lio;
  1101. int rval = QLA_FUNCTION_FAILED;
  1102. if (!vha->flags.online) {
  1103. ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
  1104. __func__, __LINE__, fcport->port_name);
  1105. return rval;
  1106. }
  1107. if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
  1108. fcport->fw_login_state == DSC_LS_PRLI_PEND) &&
  1109. qla_dual_mode_enabled(vha)) {
  1110. ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
  1111. __func__, __LINE__, fcport->port_name);
  1112. return rval;
  1113. }
  1114. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  1115. if (!sp)
  1116. return rval;
  1117. fcport->flags |= FCF_ASYNC_SENT;
  1118. fcport->logout_completed = 0;
  1119. sp->type = SRB_PRLI_CMD;
  1120. sp->name = "prli";
  1121. qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
  1122. qla2x00_async_prli_sp_done);
  1123. lio = &sp->u.iocb_cmd;
  1124. lio->u.logio.flags = 0;
  1125. if (NVME_TARGET(vha->hw, fcport))
  1126. lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
  1127. ql_dbg(ql_dbg_disc, vha, 0x211b,
  1128. "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n",
  1129. fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
  1130. fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority,
  1131. NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp");
  1132. rval = qla2x00_start_sp(sp);
  1133. if (rval != QLA_SUCCESS) {
  1134. fcport->flags |= FCF_LOGIN_NEEDED;
  1135. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1136. goto done_free_sp;
  1137. }
  1138. return rval;
  1139. done_free_sp:
  1140. /* ref: INIT */
  1141. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  1142. fcport->flags &= ~FCF_ASYNC_SENT;
  1143. return rval;
  1144. }
  1145. int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
  1146. {
  1147. struct qla_work_evt *e;
  1148. e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
  1149. if (!e)
  1150. return QLA_FUNCTION_FAILED;
  1151. e->u.fcport.fcport = fcport;
  1152. e->u.fcport.opt = opt;
  1153. fcport->flags |= FCF_ASYNC_ACTIVE;
  1154. return qla2x00_post_work(vha, e);
  1155. }
  1156. int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
  1157. {
  1158. srb_t *sp;
  1159. struct srb_iocb *mbx;
  1160. int rval = QLA_FUNCTION_FAILED;
  1161. u16 *mb;
  1162. dma_addr_t pd_dma;
  1163. struct port_database_24xx *pd;
  1164. struct qla_hw_data *ha = vha->hw;
  1165. if (IS_SESSION_DELETED(fcport)) {
  1166. ql_log(ql_log_warn, vha, 0xffff,
  1167. "%s: %8phC is being delete - not sending command.\n",
  1168. __func__, fcport->port_name);
  1169. fcport->flags &= ~FCF_ASYNC_ACTIVE;
  1170. return rval;
  1171. }
  1172. if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) {
  1173. ql_log(ql_log_warn, vha, 0xffff,
  1174. "%s: %8phC online %d flags %x - not sending command.\n",
  1175. __func__, fcport->port_name, vha->flags.online, fcport->flags);
  1176. goto done;
  1177. }
  1178. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  1179. if (!sp)
  1180. goto done;
  1181. qla2x00_set_fcport_disc_state(fcport, DSC_GPDB);
  1182. fcport->flags |= FCF_ASYNC_SENT;
  1183. sp->type = SRB_MB_IOCB;
  1184. sp->name = "gpdb";
  1185. sp->gen1 = fcport->rscn_gen;
  1186. sp->gen2 = fcport->login_gen;
  1187. qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
  1188. qla24xx_async_gpdb_sp_done);
  1189. pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
  1190. if (pd == NULL) {
  1191. ql_log(ql_log_warn, vha, 0xd043,
  1192. "Failed to allocate port database structure.\n");
  1193. goto done_free_sp;
  1194. }
  1195. mb = sp->u.iocb_cmd.u.mbx.out_mb;
  1196. mb[0] = MBC_GET_PORT_DATABASE;
  1197. mb[1] = fcport->loop_id;
  1198. mb[2] = MSW(pd_dma);
  1199. mb[3] = LSW(pd_dma);
  1200. mb[6] = MSW(MSD(pd_dma));
  1201. mb[7] = LSW(MSD(pd_dma));
  1202. mb[9] = vha->vp_idx;
  1203. mb[10] = opt;
  1204. mbx = &sp->u.iocb_cmd;
  1205. mbx->u.mbx.in = (void *)pd;
  1206. mbx->u.mbx.in_dma = pd_dma;
  1207. ql_dbg(ql_dbg_disc, vha, 0x20dc,
  1208. "Async-%s %8phC hndl %x opt %x\n",
  1209. sp->name, fcport->port_name, sp->handle, opt);
  1210. rval = qla2x00_start_sp(sp);
  1211. if (rval != QLA_SUCCESS)
  1212. goto done_free_sp;
  1213. return rval;
  1214. done_free_sp:
  1215. if (pd)
  1216. dma_pool_free(ha->s_dma_pool, pd, pd_dma);
  1217. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  1218. fcport->flags &= ~FCF_ASYNC_SENT;
  1219. done:
  1220. fcport->flags &= ~FCF_ASYNC_ACTIVE;
  1221. qla24xx_post_gpdb_work(vha, fcport, opt);
  1222. return rval;
  1223. }
  1224. static
  1225. void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
  1226. {
  1227. unsigned long flags;
  1228. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  1229. ea->fcport->login_gen++;
  1230. ea->fcport->logout_on_delete = 1;
  1231. if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
  1232. vha->fcport_count++;
  1233. ea->fcport->login_succ = 1;
  1234. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  1235. qla24xx_sched_upd_fcport(ea->fcport);
  1236. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  1237. } else if (ea->fcport->login_succ) {
  1238. /*
  1239. * We have an existing session. A late RSCN delivery
  1240. * must have triggered the session to be re-validate.
  1241. * Session is still valid.
  1242. */
  1243. ql_dbg(ql_dbg_disc, vha, 0x20d6,
  1244. "%s %d %8phC session revalidate success\n",
  1245. __func__, __LINE__, ea->fcport->port_name);
  1246. qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE);
  1247. }
  1248. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  1249. }
  1250. static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport,
  1251. struct port_database_24xx *pd)
  1252. {
  1253. int rc = 0;
  1254. if (pd->secure_login) {
  1255. ql_dbg(ql_dbg_disc, vha, 0x104d,
  1256. "Secure Login established on %8phC\n",
  1257. fcport->port_name);
  1258. fcport->flags |= FCF_FCSP_DEVICE;
  1259. } else {
  1260. ql_dbg(ql_dbg_disc, vha, 0x104d,
  1261. "non-Secure Login %8phC",
  1262. fcport->port_name);
  1263. fcport->flags &= ~FCF_FCSP_DEVICE;
  1264. }
  1265. if (vha->hw->flags.edif_enabled) {
  1266. if (fcport->flags & FCF_FCSP_DEVICE) {
  1267. qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND);
  1268. /* Start edif prli timer & ring doorbell for app */
  1269. fcport->edif.rx_sa_set = 0;
  1270. fcport->edif.tx_sa_set = 0;
  1271. fcport->edif.rx_sa_pending = 0;
  1272. fcport->edif.tx_sa_pending = 0;
  1273. qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
  1274. fcport->d_id.b24);
  1275. if (DBELL_ACTIVE(vha)) {
  1276. ql_dbg(ql_dbg_disc, vha, 0x20ef,
  1277. "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n",
  1278. __func__, __LINE__, fcport->port_name);
  1279. fcport->edif.app_sess_online = 1;
  1280. qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED,
  1281. fcport->d_id.b24, 0, fcport);
  1282. }
  1283. rc = 1;
  1284. } else if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
  1285. ql_dbg(ql_dbg_disc, vha, 0x2117,
  1286. "%s %d %8phC post prli\n",
  1287. __func__, __LINE__, fcport->port_name);
  1288. qla24xx_post_prli_work(vha, fcport);
  1289. rc = 1;
  1290. }
  1291. }
  1292. return rc;
  1293. }
  1294. static
  1295. void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
  1296. {
  1297. fc_port_t *fcport = ea->fcport;
  1298. struct port_database_24xx *pd;
  1299. struct srb *sp = ea->sp;
  1300. uint8_t ls;
  1301. pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
  1302. fcport->flags &= ~FCF_ASYNC_SENT;
  1303. ql_dbg(ql_dbg_disc, vha, 0x20d2,
  1304. "%s %8phC DS %d LS %x fc4_type %x rc %x\n", __func__,
  1305. fcport->port_name, fcport->disc_state, pd->current_login_state,
  1306. fcport->fc4_type, ea->rc);
  1307. if (fcport->disc_state == DSC_DELETE_PEND) {
  1308. ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC\n",
  1309. __func__, __LINE__, fcport->port_name);
  1310. return;
  1311. }
  1312. if (NVME_TARGET(vha->hw, fcport))
  1313. ls = pd->current_login_state >> 4;
  1314. else
  1315. ls = pd->current_login_state & 0xf;
  1316. if (ea->sp->gen2 != fcport->login_gen) {
  1317. /* target side must have changed it. */
  1318. ql_dbg(ql_dbg_disc, vha, 0x20d3,
  1319. "%s %8phC generation changed\n",
  1320. __func__, fcport->port_name);
  1321. return;
  1322. } else if (ea->sp->gen1 != fcport->rscn_gen) {
  1323. qla_rscn_replay(fcport);
  1324. qlt_schedule_sess_for_deletion(fcport);
  1325. ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
  1326. __func__, __LINE__, fcport->port_name, ls);
  1327. return;
  1328. }
  1329. switch (ls) {
  1330. case PDS_PRLI_COMPLETE:
  1331. __qla24xx_parse_gpdb(vha, fcport, pd);
  1332. break;
  1333. case PDS_PLOGI_COMPLETE:
  1334. if (qla_chk_secure_login(vha, fcport, pd)) {
  1335. ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
  1336. __func__, __LINE__, fcport->port_name, ls);
  1337. return;
  1338. }
  1339. fallthrough;
  1340. case PDS_PLOGI_PENDING:
  1341. case PDS_PRLI_PENDING:
  1342. case PDS_PRLI2_PENDING:
  1343. /* Set discovery state back to GNL to Relogin attempt */
  1344. if (qla_dual_mode_enabled(vha) ||
  1345. qla_ini_mode_enabled(vha)) {
  1346. qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
  1347. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1348. }
  1349. ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
  1350. __func__, __LINE__, fcport->port_name, ls);
  1351. return;
  1352. case PDS_LOGO_PENDING:
  1353. case PDS_PORT_UNAVAILABLE:
  1354. default:
  1355. ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
  1356. __func__, __LINE__, fcport->port_name);
  1357. qlt_schedule_sess_for_deletion(fcport);
  1358. return;
  1359. }
  1360. __qla24xx_handle_gpdb_event(vha, ea);
  1361. } /* gpdb event */
  1362. static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
  1363. {
  1364. u8 login = 0;
  1365. int rc;
  1366. ql_dbg(ql_dbg_disc, vha, 0x307b,
  1367. "%s %8phC DS %d LS %d lid %d retries=%d\n",
  1368. __func__, fcport->port_name, fcport->disc_state,
  1369. fcport->fw_login_state, fcport->loop_id, fcport->login_retry);
  1370. if (qla_tgt_mode_enabled(vha))
  1371. return;
  1372. if (qla_dual_mode_enabled(vha)) {
  1373. if (N2N_TOPO(vha->hw)) {
  1374. u64 mywwn, wwn;
  1375. mywwn = wwn_to_u64(vha->port_name);
  1376. wwn = wwn_to_u64(fcport->port_name);
  1377. if (mywwn > wwn)
  1378. login = 1;
  1379. else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
  1380. && time_after_eq(jiffies,
  1381. fcport->plogi_nack_done_deadline))
  1382. login = 1;
  1383. } else {
  1384. login = 1;
  1385. }
  1386. } else {
  1387. /* initiator mode */
  1388. login = 1;
  1389. }
  1390. if (login && fcport->login_retry) {
  1391. fcport->login_retry--;
  1392. if (fcport->loop_id == FC_NO_LOOP_ID) {
  1393. fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
  1394. rc = qla2x00_find_new_loop_id(vha, fcport);
  1395. if (rc) {
  1396. ql_dbg(ql_dbg_disc, vha, 0x20e6,
  1397. "%s %d %8phC post del sess - out of loopid\n",
  1398. __func__, __LINE__, fcport->port_name);
  1399. fcport->scan_state = 0;
  1400. qlt_schedule_sess_for_deletion(fcport);
  1401. return;
  1402. }
  1403. }
  1404. ql_dbg(ql_dbg_disc, vha, 0x20bf,
  1405. "%s %d %8phC post login\n",
  1406. __func__, __LINE__, fcport->port_name);
  1407. qla2x00_post_async_login_work(vha, fcport, NULL);
  1408. }
  1409. }
  1410. int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
  1411. {
  1412. u16 data[2];
  1413. u64 wwn;
  1414. u16 sec;
  1415. ql_dbg(ql_dbg_disc, vha, 0x20d8,
  1416. "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d fc4type %x\n",
  1417. __func__, fcport->port_name, fcport->disc_state,
  1418. fcport->fw_login_state, fcport->login_pause, fcport->flags,
  1419. fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
  1420. fcport->login_gen, fcport->loop_id, fcport->scan_state,
  1421. fcport->fc4_type);
  1422. if (fcport->scan_state != QLA_FCPORT_FOUND ||
  1423. fcport->disc_state == DSC_DELETE_PEND)
  1424. return 0;
  1425. if ((fcport->loop_id != FC_NO_LOOP_ID) &&
  1426. qla_dual_mode_enabled(vha) &&
  1427. ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
  1428. (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
  1429. return 0;
  1430. if (fcport->fw_login_state == DSC_LS_PLOGI_COMP &&
  1431. !N2N_TOPO(vha->hw)) {
  1432. if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
  1433. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1434. return 0;
  1435. }
  1436. }
  1437. /* Target won't initiate port login if fabric is present */
  1438. if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw))
  1439. return 0;
  1440. if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) {
  1441. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1442. return 0;
  1443. }
  1444. switch (fcport->disc_state) {
  1445. case DSC_DELETED:
  1446. wwn = wwn_to_u64(fcport->node_name);
  1447. switch (vha->hw->current_topology) {
  1448. case ISP_CFG_N:
  1449. if (fcport_is_smaller(fcport)) {
  1450. /* this adapter is bigger */
  1451. if (fcport->login_retry) {
  1452. if (fcport->loop_id == FC_NO_LOOP_ID) {
  1453. qla2x00_find_new_loop_id(vha,
  1454. fcport);
  1455. fcport->fw_login_state =
  1456. DSC_LS_PORT_UNAVAIL;
  1457. }
  1458. fcport->login_retry--;
  1459. qla_post_els_plogi_work(vha, fcport);
  1460. } else {
  1461. ql_log(ql_log_info, vha, 0x705d,
  1462. "Unable to reach remote port %8phC",
  1463. fcport->port_name);
  1464. }
  1465. } else {
  1466. qla24xx_post_gnl_work(vha, fcport);
  1467. }
  1468. break;
  1469. default:
  1470. if (wwn == 0) {
  1471. ql_dbg(ql_dbg_disc, vha, 0xffff,
  1472. "%s %d %8phC post GNNID\n",
  1473. __func__, __LINE__, fcport->port_name);
  1474. qla24xx_post_gnnid_work(vha, fcport);
  1475. } else if (fcport->loop_id == FC_NO_LOOP_ID) {
  1476. ql_dbg(ql_dbg_disc, vha, 0x20bd,
  1477. "%s %d %8phC post gnl\n",
  1478. __func__, __LINE__, fcport->port_name);
  1479. qla24xx_post_gnl_work(vha, fcport);
  1480. } else {
  1481. qla_chk_n2n_b4_login(vha, fcport);
  1482. }
  1483. break;
  1484. }
  1485. break;
  1486. case DSC_GNL:
  1487. switch (vha->hw->current_topology) {
  1488. case ISP_CFG_N:
  1489. if ((fcport->current_login_state & 0xf) == 0x6) {
  1490. ql_dbg(ql_dbg_disc, vha, 0x2118,
  1491. "%s %d %8phC post GPDB work\n",
  1492. __func__, __LINE__, fcport->port_name);
  1493. fcport->chip_reset =
  1494. vha->hw->base_qpair->chip_reset;
  1495. qla24xx_post_gpdb_work(vha, fcport, 0);
  1496. } else {
  1497. ql_dbg(ql_dbg_disc, vha, 0x2118,
  1498. "%s %d %8phC post %s PRLI\n",
  1499. __func__, __LINE__, fcport->port_name,
  1500. NVME_TARGET(vha->hw, fcport) ? "NVME" :
  1501. "FC");
  1502. qla24xx_post_prli_work(vha, fcport);
  1503. }
  1504. break;
  1505. default:
  1506. if (fcport->login_pause) {
  1507. ql_dbg(ql_dbg_disc, vha, 0x20d8,
  1508. "%s %d %8phC exit\n",
  1509. __func__, __LINE__,
  1510. fcport->port_name);
  1511. fcport->last_rscn_gen = fcport->rscn_gen;
  1512. fcport->last_login_gen = fcport->login_gen;
  1513. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1514. break;
  1515. }
  1516. qla_chk_n2n_b4_login(vha, fcport);
  1517. break;
  1518. }
  1519. break;
  1520. case DSC_LOGIN_FAILED:
  1521. if (N2N_TOPO(vha->hw))
  1522. qla_chk_n2n_b4_login(vha, fcport);
  1523. else
  1524. qlt_schedule_sess_for_deletion(fcport);
  1525. break;
  1526. case DSC_LOGIN_COMPLETE:
  1527. /* recheck login state */
  1528. data[0] = data[1] = 0;
  1529. qla2x00_post_async_adisc_work(vha, fcport, data);
  1530. break;
  1531. case DSC_LOGIN_PEND:
  1532. if (vha->hw->flags.edif_enabled)
  1533. break;
  1534. if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
  1535. ql_dbg(ql_dbg_disc, vha, 0x2118,
  1536. "%s %d %8phC post %s PRLI\n",
  1537. __func__, __LINE__, fcport->port_name,
  1538. NVME_TARGET(vha->hw, fcport) ? "NVME" : "FC");
  1539. qla24xx_post_prli_work(vha, fcport);
  1540. }
  1541. break;
  1542. case DSC_UPD_FCPORT:
  1543. sec = jiffies_to_msecs(jiffies -
  1544. fcport->jiffies_at_registration)/1000;
  1545. if (fcport->sec_since_registration < sec && sec &&
  1546. !(sec % 60)) {
  1547. fcport->sec_since_registration = sec;
  1548. ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
  1549. "%s %8phC - Slow Rport registration(%d Sec)\n",
  1550. __func__, fcport->port_name, sec);
  1551. }
  1552. if (fcport->next_disc_state != DSC_DELETE_PEND)
  1553. fcport->next_disc_state = DSC_ADISC;
  1554. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1555. break;
  1556. default:
  1557. break;
  1558. }
  1559. return 0;
  1560. }
  1561. int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
  1562. u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
  1563. {
  1564. struct qla_work_evt *e;
  1565. e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
  1566. if (!e)
  1567. return QLA_FUNCTION_FAILED;
  1568. e->u.new_sess.id = *id;
  1569. e->u.new_sess.pla = pla;
  1570. e->u.new_sess.fc4_type = fc4_type;
  1571. memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
  1572. if (node_name)
  1573. memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
  1574. return qla2x00_post_work(vha, e);
  1575. }
  1576. void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
  1577. {
  1578. fc_port_t *fcport;
  1579. unsigned long flags;
  1580. switch (ea->id.b.rsvd_1) {
  1581. case RSCN_PORT_ADDR:
  1582. fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
  1583. if (fcport) {
  1584. if (ql2xfc2target &&
  1585. fcport->flags & FCF_FCP2_DEVICE &&
  1586. atomic_read(&fcport->state) == FCS_ONLINE) {
  1587. ql_dbg(ql_dbg_disc, vha, 0x2115,
  1588. "Delaying session delete for FCP2 portid=%06x %8phC ",
  1589. fcport->d_id.b24, fcport->port_name);
  1590. return;
  1591. }
  1592. if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) {
  1593. /*
  1594. * On ipsec start by remote port, Target port
  1595. * may use RSCN to trigger initiator to
  1596. * relogin. If driver is already in the
  1597. * process of a relogin, then ignore the RSCN
  1598. * and allow the current relogin to continue.
  1599. * This reduces thrashing of the connection.
  1600. */
  1601. if (atomic_read(&fcport->state) == FCS_ONLINE) {
  1602. /*
  1603. * If state = online, then set scan_needed=1 to do relogin.
  1604. * Otherwise we're already in the middle of a relogin
  1605. */
  1606. fcport->scan_needed = 1;
  1607. fcport->rscn_gen++;
  1608. }
  1609. } else {
  1610. fcport->scan_needed = 1;
  1611. fcport->rscn_gen++;
  1612. }
  1613. }
  1614. break;
  1615. case RSCN_AREA_ADDR:
  1616. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  1617. if (fcport->flags & FCF_FCP2_DEVICE &&
  1618. atomic_read(&fcport->state) == FCS_ONLINE)
  1619. continue;
  1620. if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) {
  1621. fcport->scan_needed = 1;
  1622. fcport->rscn_gen++;
  1623. }
  1624. }
  1625. break;
  1626. case RSCN_DOM_ADDR:
  1627. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  1628. if (fcport->flags & FCF_FCP2_DEVICE &&
  1629. atomic_read(&fcport->state) == FCS_ONLINE)
  1630. continue;
  1631. if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) {
  1632. fcport->scan_needed = 1;
  1633. fcport->rscn_gen++;
  1634. }
  1635. }
  1636. break;
  1637. case RSCN_FAB_ADDR:
  1638. default:
  1639. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  1640. if (fcport->flags & FCF_FCP2_DEVICE &&
  1641. atomic_read(&fcport->state) == FCS_ONLINE)
  1642. continue;
  1643. fcport->scan_needed = 1;
  1644. fcport->rscn_gen++;
  1645. }
  1646. break;
  1647. }
  1648. spin_lock_irqsave(&vha->work_lock, flags);
  1649. if (vha->scan.scan_flags == 0) {
  1650. ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
  1651. vha->scan.scan_flags |= SF_QUEUED;
  1652. schedule_delayed_work(&vha->scan.scan_work, 5);
  1653. }
  1654. spin_unlock_irqrestore(&vha->work_lock, flags);
  1655. }
  1656. void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
  1657. struct event_arg *ea)
  1658. {
  1659. fc_port_t *fcport = ea->fcport;
  1660. if (test_bit(UNLOADING, &vha->dpc_flags))
  1661. return;
  1662. ql_dbg(ql_dbg_disc, vha, 0x2102,
  1663. "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
  1664. __func__, fcport->port_name, fcport->disc_state,
  1665. fcport->fw_login_state, fcport->login_pause,
  1666. fcport->deleted, fcport->conflict,
  1667. fcport->last_rscn_gen, fcport->rscn_gen,
  1668. fcport->last_login_gen, fcport->login_gen,
  1669. fcport->flags);
  1670. if (fcport->last_rscn_gen != fcport->rscn_gen) {
  1671. ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n",
  1672. __func__, __LINE__, fcport->port_name);
  1673. qla24xx_post_gnl_work(vha, fcport);
  1674. return;
  1675. }
  1676. qla24xx_fcport_handle_login(vha, fcport);
  1677. }
  1678. void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
  1679. struct event_arg *ea)
  1680. {
  1681. if (N2N_TOPO(vha->hw) && fcport_is_smaller(ea->fcport) &&
  1682. vha->hw->flags.edif_enabled) {
  1683. /* check to see if App support Secure */
  1684. qla24xx_post_gpdb_work(vha, ea->fcport, 0);
  1685. return;
  1686. }
  1687. /* for pure Target Mode, PRLI will not be initiated */
  1688. if (vha->host->active_mode == MODE_TARGET)
  1689. return;
  1690. ql_dbg(ql_dbg_disc, vha, 0x2118,
  1691. "%s %d %8phC post PRLI\n",
  1692. __func__, __LINE__, ea->fcport->port_name);
  1693. qla24xx_post_prli_work(vha, ea->fcport);
  1694. }
  1695. /*
  1696. * RSCN(s) came in for this fcport, but the RSCN(s) was not able
  1697. * to be consumed by the fcport
  1698. */
  1699. void qla_rscn_replay(fc_port_t *fcport)
  1700. {
  1701. struct event_arg ea;
  1702. switch (fcport->disc_state) {
  1703. case DSC_DELETE_PEND:
  1704. return;
  1705. default:
  1706. break;
  1707. }
  1708. if (fcport->scan_needed) {
  1709. memset(&ea, 0, sizeof(ea));
  1710. ea.id = fcport->d_id;
  1711. ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
  1712. qla2x00_handle_rscn(fcport->vha, &ea);
  1713. }
  1714. }
  1715. static void
  1716. qla2x00_tmf_iocb_timeout(void *data)
  1717. {
  1718. srb_t *sp = data;
  1719. struct srb_iocb *tmf = &sp->u.iocb_cmd;
  1720. int rc, h;
  1721. unsigned long flags;
  1722. if (sp->type == SRB_MARKER)
  1723. rc = QLA_FUNCTION_FAILED;
  1724. else
  1725. rc = qla24xx_async_abort_cmd(sp, false);
  1726. if (rc) {
  1727. spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
  1728. for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
  1729. if (sp->qpair->req->outstanding_cmds[h] == sp) {
  1730. sp->qpair->req->outstanding_cmds[h] = NULL;
  1731. qla_put_fw_resources(sp->qpair, &sp->iores);
  1732. break;
  1733. }
  1734. }
  1735. spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
  1736. tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT);
  1737. tmf->u.tmf.data = QLA_FUNCTION_FAILED;
  1738. complete(&tmf->u.tmf.comp);
  1739. }
  1740. }
  1741. static void qla_marker_sp_done(srb_t *sp, int res)
  1742. {
  1743. struct srb_iocb *tmf = &sp->u.iocb_cmd;
  1744. if (res != QLA_SUCCESS)
  1745. ql_dbg(ql_dbg_taskm, sp->vha, 0x8004,
  1746. "Async-marker fail hdl=%x portid=%06x ctrl=%x lun=%lld qp=%d.\n",
  1747. sp->handle, sp->fcport->d_id.b24, sp->u.iocb_cmd.u.tmf.flags,
  1748. sp->u.iocb_cmd.u.tmf.lun, sp->qpair->id);
  1749. sp->u.iocb_cmd.u.tmf.data = res;
  1750. complete(&tmf->u.tmf.comp);
  1751. }
  1752. #define START_SP_W_RETRIES(_sp, _rval, _chip_gen, _login_gen) \
  1753. {\
  1754. int cnt = 5; \
  1755. do { \
  1756. if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\
  1757. _rval = EINVAL; \
  1758. break; \
  1759. } \
  1760. _rval = qla2x00_start_sp(_sp); \
  1761. if (_rval == EAGAIN) \
  1762. msleep(1); \
  1763. else \
  1764. break; \
  1765. cnt--; \
  1766. } while (cnt); \
  1767. }
  1768. /**
  1769. * qla26xx_marker: send marker IOCB and wait for the completion of it.
  1770. * @arg: pointer to argument list.
  1771. * It is assume caller will provide an fcport pointer and modifier
  1772. */
  1773. static int
  1774. qla26xx_marker(struct tmf_arg *arg)
  1775. {
  1776. struct scsi_qla_host *vha = arg->vha;
  1777. struct srb_iocb *tm_iocb;
  1778. srb_t *sp;
  1779. int rval = QLA_FUNCTION_FAILED;
  1780. fc_port_t *fcport = arg->fcport;
  1781. u32 chip_gen, login_gen;
  1782. if (TMF_NOT_READY(arg->fcport)) {
  1783. ql_dbg(ql_dbg_taskm, vha, 0x8039,
  1784. "FC port not ready for marker loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n",
  1785. fcport->loop_id, fcport->d_id.b24,
  1786. arg->modifier, arg->lun, arg->qpair->id);
  1787. return QLA_SUSPENDED;
  1788. }
  1789. chip_gen = vha->hw->chip_reset;
  1790. login_gen = fcport->login_gen;
  1791. /* ref: INIT */
  1792. sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
  1793. if (!sp)
  1794. goto done;
  1795. sp->type = SRB_MARKER;
  1796. sp->name = "marker";
  1797. qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), qla_marker_sp_done);
  1798. sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
  1799. tm_iocb = &sp->u.iocb_cmd;
  1800. init_completion(&tm_iocb->u.tmf.comp);
  1801. tm_iocb->u.tmf.modifier = arg->modifier;
  1802. tm_iocb->u.tmf.lun = arg->lun;
  1803. tm_iocb->u.tmf.loop_id = fcport->loop_id;
  1804. tm_iocb->u.tmf.vp_index = vha->vp_idx;
  1805. START_SP_W_RETRIES(sp, rval, chip_gen, login_gen);
  1806. ql_dbg(ql_dbg_taskm, vha, 0x8006,
  1807. "Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
  1808. sp->handle, fcport->loop_id, fcport->d_id.b24,
  1809. arg->modifier, arg->lun, sp->qpair->id, rval);
  1810. if (rval != QLA_SUCCESS) {
  1811. ql_log(ql_log_warn, vha, 0x8031,
  1812. "Marker IOCB send failure (%x).\n", rval);
  1813. goto done_free_sp;
  1814. }
  1815. wait_for_completion(&tm_iocb->u.tmf.comp);
  1816. rval = tm_iocb->u.tmf.data;
  1817. if (rval != QLA_SUCCESS) {
  1818. ql_log(ql_log_warn, vha, 0x8019,
  1819. "Marker failed hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
  1820. sp->handle, fcport->loop_id, fcport->d_id.b24,
  1821. arg->modifier, arg->lun, sp->qpair->id, rval);
  1822. }
  1823. done_free_sp:
  1824. /* ref: INIT */
  1825. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  1826. done:
  1827. return rval;
  1828. }
  1829. static void qla2x00_tmf_sp_done(srb_t *sp, int res)
  1830. {
  1831. struct srb_iocb *tmf = &sp->u.iocb_cmd;
  1832. if (res)
  1833. tmf->u.tmf.data = res;
  1834. complete(&tmf->u.tmf.comp);
  1835. }
  1836. static int qla_tmf_wait(struct tmf_arg *arg)
  1837. {
  1838. /* there are only 2 types of error handling that reaches here, lun or target reset */
  1839. if (arg->flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET | TCF_CLEAR_TASK_SET))
  1840. return qla2x00_eh_wait_for_pending_commands(arg->vha,
  1841. arg->fcport->d_id.b24, arg->lun, WAIT_LUN);
  1842. else
  1843. return qla2x00_eh_wait_for_pending_commands(arg->vha,
  1844. arg->fcport->d_id.b24, arg->lun, WAIT_TARGET);
  1845. }
  1846. static int
  1847. __qla2x00_async_tm_cmd(struct tmf_arg *arg)
  1848. {
  1849. struct scsi_qla_host *vha = arg->vha;
  1850. struct srb_iocb *tm_iocb;
  1851. srb_t *sp;
  1852. int rval = QLA_FUNCTION_FAILED;
  1853. fc_port_t *fcport = arg->fcport;
  1854. u32 chip_gen, login_gen;
  1855. u64 jif;
  1856. if (TMF_NOT_READY(arg->fcport)) {
  1857. ql_dbg(ql_dbg_taskm, vha, 0x8032,
  1858. "FC port not ready for TM command loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n",
  1859. fcport->loop_id, fcport->d_id.b24,
  1860. arg->modifier, arg->lun, arg->qpair->id);
  1861. return QLA_SUSPENDED;
  1862. }
  1863. chip_gen = vha->hw->chip_reset;
  1864. login_gen = fcport->login_gen;
  1865. /* ref: INIT */
  1866. sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
  1867. if (!sp)
  1868. goto done;
  1869. qla_vha_mark_busy(vha);
  1870. sp->type = SRB_TM_CMD;
  1871. sp->name = "tmf";
  1872. qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha),
  1873. qla2x00_tmf_sp_done);
  1874. sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
  1875. tm_iocb = &sp->u.iocb_cmd;
  1876. init_completion(&tm_iocb->u.tmf.comp);
  1877. tm_iocb->u.tmf.flags = arg->flags;
  1878. tm_iocb->u.tmf.lun = arg->lun;
  1879. START_SP_W_RETRIES(sp, rval, chip_gen, login_gen);
  1880. ql_dbg(ql_dbg_taskm, vha, 0x802f,
  1881. "Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n",
  1882. sp->handle, fcport->loop_id, fcport->d_id.b24,
  1883. arg->flags, arg->lun, sp->qpair->id, rval);
  1884. if (rval != QLA_SUCCESS)
  1885. goto done_free_sp;
  1886. wait_for_completion(&tm_iocb->u.tmf.comp);
  1887. rval = tm_iocb->u.tmf.data;
  1888. if (rval != QLA_SUCCESS) {
  1889. ql_log(ql_log_warn, vha, 0x8030,
  1890. "TM IOCB failed (%x).\n", rval);
  1891. }
  1892. if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
  1893. jif = jiffies;
  1894. if (qla_tmf_wait(arg)) {
  1895. ql_log(ql_log_info, vha, 0x803e,
  1896. "Waited %u ms Nexus=%ld:%06x:%llu.\n",
  1897. jiffies_to_msecs(jiffies - jif), vha->host_no,
  1898. fcport->d_id.b24, arg->lun);
  1899. }
  1900. if (chip_gen == vha->hw->chip_reset && login_gen == fcport->login_gen) {
  1901. rval = qla26xx_marker(arg);
  1902. } else {
  1903. ql_log(ql_log_info, vha, 0x803e,
  1904. "Skip Marker due to disruption. Nexus=%ld:%06x:%llu.\n",
  1905. vha->host_no, fcport->d_id.b24, arg->lun);
  1906. rval = QLA_FUNCTION_FAILED;
  1907. }
  1908. }
  1909. if (tm_iocb->u.tmf.data)
  1910. rval = tm_iocb->u.tmf.data;
  1911. done_free_sp:
  1912. /* ref: INIT */
  1913. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  1914. done:
  1915. return rval;
  1916. }
  1917. static void qla_put_tmf(struct tmf_arg *arg)
  1918. {
  1919. struct scsi_qla_host *vha = arg->vha;
  1920. struct qla_hw_data *ha = vha->hw;
  1921. unsigned long flags;
  1922. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  1923. ha->active_tmf--;
  1924. list_del(&arg->tmf_elem);
  1925. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  1926. }
  1927. static
  1928. int qla_get_tmf(struct tmf_arg *arg)
  1929. {
  1930. struct scsi_qla_host *vha = arg->vha;
  1931. struct qla_hw_data *ha = vha->hw;
  1932. unsigned long flags;
  1933. fc_port_t *fcport = arg->fcport;
  1934. int rc = 0;
  1935. struct tmf_arg *t;
  1936. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  1937. list_for_each_entry(t, &ha->tmf_active, tmf_elem) {
  1938. if (t->fcport == arg->fcport && t->lun == arg->lun) {
  1939. /* reject duplicate TMF */
  1940. ql_log(ql_log_warn, vha, 0x802c,
  1941. "found duplicate TMF. Nexus=%ld:%06x:%llu.\n",
  1942. vha->host_no, fcport->d_id.b24, arg->lun);
  1943. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  1944. return -EINVAL;
  1945. }
  1946. }
  1947. list_add_tail(&arg->tmf_elem, &ha->tmf_pending);
  1948. while (ha->active_tmf >= MAX_ACTIVE_TMF) {
  1949. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  1950. msleep(1);
  1951. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  1952. if (TMF_NOT_READY(fcport)) {
  1953. ql_log(ql_log_warn, vha, 0x802c,
  1954. "Unable to acquire TM resource due to disruption.\n");
  1955. rc = EIO;
  1956. break;
  1957. }
  1958. if (ha->active_tmf < MAX_ACTIVE_TMF &&
  1959. list_is_first(&arg->tmf_elem, &ha->tmf_pending))
  1960. break;
  1961. }
  1962. list_del(&arg->tmf_elem);
  1963. if (!rc) {
  1964. ha->active_tmf++;
  1965. list_add_tail(&arg->tmf_elem, &ha->tmf_active);
  1966. }
  1967. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  1968. return rc;
  1969. }
  1970. int
  1971. qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
  1972. uint32_t tag)
  1973. {
  1974. struct scsi_qla_host *vha = fcport->vha;
  1975. struct tmf_arg a;
  1976. int rval = QLA_SUCCESS;
  1977. if (TMF_NOT_READY(fcport))
  1978. return QLA_SUSPENDED;
  1979. a.vha = fcport->vha;
  1980. a.fcport = fcport;
  1981. a.lun = lun;
  1982. a.flags = flags;
  1983. INIT_LIST_HEAD(&a.tmf_elem);
  1984. if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
  1985. a.modifier = MK_SYNC_ID_LUN;
  1986. } else {
  1987. a.modifier = MK_SYNC_ID;
  1988. }
  1989. if (qla_get_tmf(&a))
  1990. return QLA_FUNCTION_FAILED;
  1991. a.qpair = vha->hw->base_qpair;
  1992. rval = __qla2x00_async_tm_cmd(&a);
  1993. qla_put_tmf(&a);
  1994. return rval;
  1995. }
  1996. int
  1997. qla24xx_async_abort_command(srb_t *sp)
  1998. {
  1999. unsigned long flags = 0;
  2000. uint32_t handle;
  2001. fc_port_t *fcport = sp->fcport;
  2002. struct qla_qpair *qpair = sp->qpair;
  2003. struct scsi_qla_host *vha = fcport->vha;
  2004. struct req_que *req = qpair->req;
  2005. spin_lock_irqsave(qpair->qp_lock_ptr, flags);
  2006. for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
  2007. if (req->outstanding_cmds[handle] == sp)
  2008. break;
  2009. }
  2010. spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
  2011. if (handle == req->num_outstanding_cmds) {
  2012. /* Command not found. */
  2013. return QLA_ERR_NOT_FOUND;
  2014. }
  2015. if (sp->type == SRB_FXIOCB_DCMD)
  2016. return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
  2017. FXDISC_ABORT_IOCTL);
  2018. return qla24xx_async_abort_cmd(sp, true);
  2019. }
  2020. static void
  2021. qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
  2022. {
  2023. struct srb *sp;
  2024. WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
  2025. ea->data[0]);
  2026. switch (ea->data[0]) {
  2027. case MBS_COMMAND_COMPLETE:
  2028. ql_dbg(ql_dbg_disc, vha, 0x2118,
  2029. "%s %d %8phC post gpdb\n",
  2030. __func__, __LINE__, ea->fcport->port_name);
  2031. ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
  2032. ea->fcport->logout_on_delete = 1;
  2033. ea->fcport->nvme_prli_service_param = ea->iop[0];
  2034. if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST)
  2035. ea->fcport->nvme_first_burst_size =
  2036. (ea->iop[1] & 0xffff) * 512;
  2037. else
  2038. ea->fcport->nvme_first_burst_size = 0;
  2039. qla24xx_post_gpdb_work(vha, ea->fcport, 0);
  2040. break;
  2041. default:
  2042. sp = ea->sp;
  2043. ql_dbg(ql_dbg_disc, vha, 0x2118,
  2044. "%s %d %8phC priority %s, fc4type %x prev try %s\n",
  2045. __func__, __LINE__, ea->fcport->port_name,
  2046. vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ?
  2047. "FCP" : "NVMe", ea->fcport->fc4_type,
  2048. (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) ?
  2049. "NVME" : "FCP");
  2050. if (NVME_FCP_TARGET(ea->fcport)) {
  2051. if (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI)
  2052. ea->fcport->do_prli_nvme = 0;
  2053. else
  2054. ea->fcport->do_prli_nvme = 1;
  2055. } else {
  2056. ea->fcport->do_prli_nvme = 0;
  2057. }
  2058. if (N2N_TOPO(vha->hw)) {
  2059. if (ea->fcport->n2n_link_reset_cnt ==
  2060. vha->hw->login_retry_count &&
  2061. ea->fcport->flags & FCF_FCSP_DEVICE) {
  2062. /* remote authentication app just started */
  2063. ea->fcport->n2n_link_reset_cnt = 0;
  2064. }
  2065. if (ea->fcport->n2n_link_reset_cnt <
  2066. vha->hw->login_retry_count) {
  2067. ea->fcport->n2n_link_reset_cnt++;
  2068. vha->relogin_jif = jiffies + 2 * HZ;
  2069. /*
  2070. * PRLI failed. Reset link to kick start
  2071. * state machine
  2072. */
  2073. set_bit(N2N_LINK_RESET, &vha->dpc_flags);
  2074. qla2xxx_wake_dpc(vha);
  2075. } else {
  2076. ql_log(ql_log_warn, vha, 0x2119,
  2077. "%s %d %8phC Unable to reconnect\n",
  2078. __func__, __LINE__,
  2079. ea->fcport->port_name);
  2080. }
  2081. } else {
  2082. /*
  2083. * switch connect. login failed. Take connection down
  2084. * and allow relogin to retrigger
  2085. */
  2086. ea->fcport->flags &= ~FCF_ASYNC_SENT;
  2087. ea->fcport->keep_nport_handle = 0;
  2088. ea->fcport->logout_on_delete = 1;
  2089. qlt_schedule_sess_for_deletion(ea->fcport);
  2090. }
  2091. break;
  2092. }
  2093. }
  2094. void
  2095. qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
  2096. {
  2097. port_id_t cid; /* conflict Nport id */
  2098. u16 lid;
  2099. struct fc_port *conflict_fcport;
  2100. unsigned long flags;
  2101. struct fc_port *fcport = ea->fcport;
  2102. ql_dbg(ql_dbg_disc, vha, 0xffff,
  2103. "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
  2104. __func__, fcport->port_name, fcport->disc_state,
  2105. fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
  2106. ea->sp->gen1, fcport->rscn_gen,
  2107. ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
  2108. if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
  2109. (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
  2110. ql_dbg(ql_dbg_disc, vha, 0x20ea,
  2111. "%s %d %8phC Remote is trying to login\n",
  2112. __func__, __LINE__, fcport->port_name);
  2113. return;
  2114. }
  2115. if ((fcport->disc_state == DSC_DELETE_PEND) ||
  2116. (fcport->disc_state == DSC_DELETED)) {
  2117. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  2118. return;
  2119. }
  2120. if (ea->sp->gen2 != fcport->login_gen) {
  2121. /* target side must have changed it. */
  2122. ql_dbg(ql_dbg_disc, vha, 0x20d3,
  2123. "%s %8phC generation changed\n",
  2124. __func__, fcport->port_name);
  2125. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  2126. return;
  2127. } else if (ea->sp->gen1 != fcport->rscn_gen) {
  2128. ql_dbg(ql_dbg_disc, vha, 0x20d3,
  2129. "%s %8phC RSCN generation changed\n",
  2130. __func__, fcport->port_name);
  2131. qla_rscn_replay(fcport);
  2132. qlt_schedule_sess_for_deletion(fcport);
  2133. return;
  2134. }
  2135. WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
  2136. ea->data[0]);
  2137. switch (ea->data[0]) {
  2138. case MBS_COMMAND_COMPLETE:
  2139. /*
  2140. * Driver must validate login state - If PRLI not complete,
  2141. * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
  2142. * requests.
  2143. */
  2144. if (vha->hw->flags.edif_enabled) {
  2145. set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
  2146. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  2147. ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
  2148. ea->fcport->logout_on_delete = 1;
  2149. ea->fcport->send_els_logo = 0;
  2150. ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
  2151. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  2152. qla24xx_post_gpdb_work(vha, ea->fcport, 0);
  2153. } else {
  2154. if (NVME_TARGET(vha->hw, fcport)) {
  2155. ql_dbg(ql_dbg_disc, vha, 0x2117,
  2156. "%s %d %8phC post prli\n",
  2157. __func__, __LINE__, fcport->port_name);
  2158. qla24xx_post_prli_work(vha, fcport);
  2159. } else {
  2160. ql_dbg(ql_dbg_disc, vha, 0x20ea,
  2161. "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
  2162. __func__, __LINE__, fcport->port_name,
  2163. fcport->loop_id, fcport->d_id.b24);
  2164. set_bit(fcport->loop_id, vha->hw->loop_id_map);
  2165. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  2166. fcport->chip_reset = vha->hw->base_qpair->chip_reset;
  2167. fcport->logout_on_delete = 1;
  2168. fcport->send_els_logo = 0;
  2169. fcport->fw_login_state = DSC_LS_PRLI_COMP;
  2170. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  2171. qla24xx_post_gpdb_work(vha, fcport, 0);
  2172. }
  2173. }
  2174. break;
  2175. case MBS_COMMAND_ERROR:
  2176. ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
  2177. __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
  2178. qlt_schedule_sess_for_deletion(ea->fcport);
  2179. break;
  2180. case MBS_LOOP_ID_USED:
  2181. /* data[1] = IO PARAM 1 = nport ID */
  2182. cid.b.domain = (ea->iop[1] >> 16) & 0xff;
  2183. cid.b.area = (ea->iop[1] >> 8) & 0xff;
  2184. cid.b.al_pa = ea->iop[1] & 0xff;
  2185. cid.b.rsvd_1 = 0;
  2186. ql_dbg(ql_dbg_disc, vha, 0x20ec,
  2187. "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
  2188. __func__, __LINE__, ea->fcport->port_name,
  2189. ea->fcport->loop_id, cid.b24);
  2190. set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
  2191. ea->fcport->loop_id = FC_NO_LOOP_ID;
  2192. qla24xx_post_gnl_work(vha, ea->fcport);
  2193. break;
  2194. case MBS_PORT_ID_USED:
  2195. lid = ea->iop[1] & 0xffff;
  2196. qlt_find_sess_invalidate_other(vha,
  2197. wwn_to_u64(ea->fcport->port_name),
  2198. ea->fcport->d_id, lid, &conflict_fcport);
  2199. if (conflict_fcport) {
  2200. /*
  2201. * Another fcport share the same loop_id/nport id.
  2202. * Conflict fcport needs to finish cleanup before this
  2203. * fcport can proceed to login.
  2204. */
  2205. conflict_fcport->conflict = ea->fcport;
  2206. ea->fcport->login_pause = 1;
  2207. ql_dbg(ql_dbg_disc, vha, 0x20ed,
  2208. "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
  2209. __func__, __LINE__, ea->fcport->port_name,
  2210. ea->fcport->d_id.b24, lid);
  2211. } else {
  2212. ql_dbg(ql_dbg_disc, vha, 0x20ed,
  2213. "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
  2214. __func__, __LINE__, ea->fcport->port_name,
  2215. ea->fcport->d_id.b24, lid);
  2216. qla2x00_clear_loop_id(ea->fcport);
  2217. set_bit(lid, vha->hw->loop_id_map);
  2218. ea->fcport->loop_id = lid;
  2219. ea->fcport->keep_nport_handle = 0;
  2220. ea->fcport->logout_on_delete = 1;
  2221. qlt_schedule_sess_for_deletion(ea->fcport);
  2222. }
  2223. break;
  2224. }
  2225. return;
  2226. }
  2227. /****************************************************************************/
  2228. /* QLogic ISP2x00 Hardware Support Functions. */
  2229. /****************************************************************************/
  2230. static int
  2231. qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
  2232. {
  2233. int rval = QLA_SUCCESS;
  2234. struct qla_hw_data *ha = vha->hw;
  2235. uint32_t idc_major_ver, idc_minor_ver;
  2236. uint16_t config[4];
  2237. qla83xx_idc_lock(vha, 0);
  2238. /* SV: TODO: Assign initialization timeout from
  2239. * flash-info / other param
  2240. */
  2241. ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
  2242. ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
  2243. /* Set our fcoe function presence */
  2244. if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
  2245. ql_dbg(ql_dbg_p3p, vha, 0xb077,
  2246. "Error while setting DRV-Presence.\n");
  2247. rval = QLA_FUNCTION_FAILED;
  2248. goto exit;
  2249. }
  2250. /* Decide the reset ownership */
  2251. qla83xx_reset_ownership(vha);
  2252. /*
  2253. * On first protocol driver load:
  2254. * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
  2255. * register.
  2256. * Others: Check compatibility with current IDC Major version.
  2257. */
  2258. qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
  2259. if (ha->flags.nic_core_reset_owner) {
  2260. /* Set IDC Major version */
  2261. idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
  2262. qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
  2263. /* Clearing IDC-Lock-Recovery register */
  2264. qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
  2265. } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
  2266. /*
  2267. * Clear further IDC participation if we are not compatible with
  2268. * the current IDC Major Version.
  2269. */
  2270. ql_log(ql_log_warn, vha, 0xb07d,
  2271. "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
  2272. idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
  2273. __qla83xx_clear_drv_presence(vha);
  2274. rval = QLA_FUNCTION_FAILED;
  2275. goto exit;
  2276. }
  2277. /* Each function sets its supported Minor version. */
  2278. qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
  2279. idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
  2280. qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
  2281. if (ha->flags.nic_core_reset_owner) {
  2282. memset(config, 0, sizeof(config));
  2283. if (!qla81xx_get_port_config(vha, config))
  2284. qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
  2285. QLA8XXX_DEV_READY);
  2286. }
  2287. rval = qla83xx_idc_state_handler(vha);
  2288. exit:
  2289. qla83xx_idc_unlock(vha, 0);
  2290. return rval;
  2291. }
  2292. /*
  2293. * qla2x00_initialize_adapter
  2294. * Initialize board.
  2295. *
  2296. * Input:
  2297. * ha = adapter block pointer.
  2298. *
  2299. * Returns:
  2300. * 0 = success
  2301. */
  2302. int
  2303. qla2x00_initialize_adapter(scsi_qla_host_t *vha)
  2304. {
  2305. int rval;
  2306. struct qla_hw_data *ha = vha->hw;
  2307. struct req_que *req = ha->req_q_map[0];
  2308. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  2309. memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
  2310. memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
  2311. /* Clear adapter flags. */
  2312. vha->flags.online = 0;
  2313. ha->flags.chip_reset_done = 0;
  2314. vha->flags.reset_active = 0;
  2315. ha->flags.pci_channel_io_perm_failure = 0;
  2316. ha->flags.eeh_busy = 0;
  2317. vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
  2318. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  2319. atomic_set(&vha->loop_state, LOOP_DOWN);
  2320. vha->device_flags = DFLG_NO_CABLE;
  2321. vha->dpc_flags = 0;
  2322. vha->flags.management_server_logged_in = 0;
  2323. vha->marker_needed = 0;
  2324. ha->isp_abort_cnt = 0;
  2325. ha->beacon_blink_led = 0;
  2326. set_bit(0, ha->req_qid_map);
  2327. set_bit(0, ha->rsp_qid_map);
  2328. ql_dbg(ql_dbg_init, vha, 0x0040,
  2329. "Configuring PCI space...\n");
  2330. rval = ha->isp_ops->pci_config(vha);
  2331. if (rval) {
  2332. ql_log(ql_log_warn, vha, 0x0044,
  2333. "Unable to configure PCI space.\n");
  2334. return (rval);
  2335. }
  2336. ha->isp_ops->reset_chip(vha);
  2337. /* Check for secure flash support */
  2338. if (IS_QLA28XX(ha)) {
  2339. if (rd_reg_word(&reg->mailbox12) & BIT_0)
  2340. ha->flags.secure_adapter = 1;
  2341. ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n",
  2342. (ha->flags.secure_adapter) ? "Yes" : "No");
  2343. }
  2344. rval = qla2xxx_get_flash_info(vha);
  2345. if (rval) {
  2346. ql_log(ql_log_fatal, vha, 0x004f,
  2347. "Unable to validate FLASH data.\n");
  2348. return rval;
  2349. }
  2350. if (IS_QLA8044(ha)) {
  2351. qla8044_read_reset_template(vha);
  2352. /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
  2353. * If DONRESET_BIT0 is set, drivers should not set dev_state
  2354. * to NEED_RESET. But if NEED_RESET is set, drivers should
  2355. * should honor the reset. */
  2356. if (ql2xdontresethba == 1)
  2357. qla8044_set_idc_dontreset(vha);
  2358. }
  2359. ha->isp_ops->get_flash_version(vha, req->ring);
  2360. ql_dbg(ql_dbg_init, vha, 0x0061,
  2361. "Configure NVRAM parameters...\n");
  2362. /* Let priority default to FCP, can be overridden by nvram_config */
  2363. ha->fc4_type_priority = FC4_PRIORITY_FCP;
  2364. ha->isp_ops->nvram_config(vha);
  2365. if (ha->fc4_type_priority != FC4_PRIORITY_FCP &&
  2366. ha->fc4_type_priority != FC4_PRIORITY_NVME)
  2367. ha->fc4_type_priority = FC4_PRIORITY_FCP;
  2368. ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n",
  2369. ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe");
  2370. if (ha->flags.disable_serdes) {
  2371. /* Mask HBA via NVRAM settings? */
  2372. ql_log(ql_log_info, vha, 0x0077,
  2373. "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
  2374. return QLA_FUNCTION_FAILED;
  2375. }
  2376. ql_dbg(ql_dbg_init, vha, 0x0078,
  2377. "Verifying loaded RISC code...\n");
  2378. /* If smartsan enabled then require fdmi and rdp enabled */
  2379. if (ql2xsmartsan) {
  2380. ql2xfdmienable = 1;
  2381. ql2xrdpenable = 1;
  2382. }
  2383. if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
  2384. rval = ha->isp_ops->chip_diag(vha);
  2385. if (rval)
  2386. return (rval);
  2387. rval = qla2x00_setup_chip(vha);
  2388. if (rval)
  2389. return (rval);
  2390. }
  2391. if (IS_QLA84XX(ha)) {
  2392. ha->cs84xx = qla84xx_get_chip(vha);
  2393. if (!ha->cs84xx) {
  2394. ql_log(ql_log_warn, vha, 0x00d0,
  2395. "Unable to configure ISP84XX.\n");
  2396. return QLA_FUNCTION_FAILED;
  2397. }
  2398. }
  2399. if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
  2400. rval = qla2x00_init_rings(vha);
  2401. /* No point in continuing if firmware initialization failed. */
  2402. if (rval != QLA_SUCCESS)
  2403. return rval;
  2404. ha->flags.chip_reset_done = 1;
  2405. if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
  2406. /* Issue verify 84xx FW IOCB to complete 84xx initialization */
  2407. rval = qla84xx_init_chip(vha);
  2408. if (rval != QLA_SUCCESS) {
  2409. ql_log(ql_log_warn, vha, 0x00d4,
  2410. "Unable to initialize ISP84XX.\n");
  2411. qla84xx_put_chip(vha);
  2412. }
  2413. }
  2414. /* Load the NIC Core f/w if we are the first protocol driver. */
  2415. if (IS_QLA8031(ha)) {
  2416. rval = qla83xx_nic_core_fw_load(vha);
  2417. if (rval)
  2418. ql_log(ql_log_warn, vha, 0x0124,
  2419. "Error in initializing NIC Core f/w.\n");
  2420. }
  2421. if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
  2422. qla24xx_read_fcp_prio_cfg(vha);
  2423. if (IS_P3P_TYPE(ha))
  2424. qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
  2425. else
  2426. qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
  2427. return (rval);
  2428. }
  2429. /**
  2430. * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
  2431. * @vha: HA context
  2432. *
  2433. * Returns 0 on success.
  2434. */
  2435. int
  2436. qla2100_pci_config(scsi_qla_host_t *vha)
  2437. {
  2438. uint16_t w;
  2439. unsigned long flags;
  2440. struct qla_hw_data *ha = vha->hw;
  2441. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  2442. pci_set_master(ha->pdev);
  2443. pci_try_set_mwi(ha->pdev);
  2444. pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
  2445. w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
  2446. pci_write_config_word(ha->pdev, PCI_COMMAND, w);
  2447. pci_disable_rom(ha->pdev);
  2448. /* Get PCI bus information. */
  2449. spin_lock_irqsave(&ha->hardware_lock, flags);
  2450. ha->pci_attr = rd_reg_word(&reg->ctrl_status);
  2451. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2452. return QLA_SUCCESS;
  2453. }
  2454. /**
  2455. * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
  2456. * @vha: HA context
  2457. *
  2458. * Returns 0 on success.
  2459. */
  2460. int
  2461. qla2300_pci_config(scsi_qla_host_t *vha)
  2462. {
  2463. uint16_t w;
  2464. unsigned long flags = 0;
  2465. uint32_t cnt;
  2466. struct qla_hw_data *ha = vha->hw;
  2467. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  2468. pci_set_master(ha->pdev);
  2469. pci_try_set_mwi(ha->pdev);
  2470. pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
  2471. w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
  2472. if (IS_QLA2322(ha) || IS_QLA6322(ha))
  2473. w &= ~PCI_COMMAND_INTX_DISABLE;
  2474. pci_write_config_word(ha->pdev, PCI_COMMAND, w);
  2475. /*
  2476. * If this is a 2300 card and not 2312, reset the
  2477. * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
  2478. * the 2310 also reports itself as a 2300 so we need to get the
  2479. * fb revision level -- a 6 indicates it really is a 2300 and
  2480. * not a 2310.
  2481. */
  2482. if (IS_QLA2300(ha)) {
  2483. spin_lock_irqsave(&ha->hardware_lock, flags);
  2484. /* Pause RISC. */
  2485. wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
  2486. for (cnt = 0; cnt < 30000; cnt++) {
  2487. if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
  2488. break;
  2489. udelay(10);
  2490. }
  2491. /* Select FPM registers. */
  2492. wrt_reg_word(&reg->ctrl_status, 0x20);
  2493. rd_reg_word(&reg->ctrl_status);
  2494. /* Get the fb rev level */
  2495. ha->fb_rev = RD_FB_CMD_REG(ha, reg);
  2496. if (ha->fb_rev == FPM_2300)
  2497. pci_clear_mwi(ha->pdev);
  2498. /* Deselect FPM registers. */
  2499. wrt_reg_word(&reg->ctrl_status, 0x0);
  2500. rd_reg_word(&reg->ctrl_status);
  2501. /* Release RISC module. */
  2502. wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
  2503. for (cnt = 0; cnt < 30000; cnt++) {
  2504. if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
  2505. break;
  2506. udelay(10);
  2507. }
  2508. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2509. }
  2510. pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
  2511. pci_disable_rom(ha->pdev);
  2512. /* Get PCI bus information. */
  2513. spin_lock_irqsave(&ha->hardware_lock, flags);
  2514. ha->pci_attr = rd_reg_word(&reg->ctrl_status);
  2515. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2516. return QLA_SUCCESS;
  2517. }
  2518. /**
  2519. * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
  2520. * @vha: HA context
  2521. *
  2522. * Returns 0 on success.
  2523. */
  2524. int
  2525. qla24xx_pci_config(scsi_qla_host_t *vha)
  2526. {
  2527. uint16_t w;
  2528. unsigned long flags = 0;
  2529. struct qla_hw_data *ha = vha->hw;
  2530. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  2531. pci_set_master(ha->pdev);
  2532. pci_try_set_mwi(ha->pdev);
  2533. pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
  2534. w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
  2535. w &= ~PCI_COMMAND_INTX_DISABLE;
  2536. pci_write_config_word(ha->pdev, PCI_COMMAND, w);
  2537. pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
  2538. /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
  2539. if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
  2540. pcix_set_mmrbc(ha->pdev, 2048);
  2541. /* PCIe -- adjust Maximum Read Request Size (2048). */
  2542. if (pci_is_pcie(ha->pdev))
  2543. pcie_set_readrq(ha->pdev, 4096);
  2544. pci_disable_rom(ha->pdev);
  2545. ha->chip_revision = ha->pdev->revision;
  2546. /* Get PCI bus information. */
  2547. spin_lock_irqsave(&ha->hardware_lock, flags);
  2548. ha->pci_attr = rd_reg_dword(&reg->ctrl_status);
  2549. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2550. return QLA_SUCCESS;
  2551. }
  2552. /**
  2553. * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
  2554. * @vha: HA context
  2555. *
  2556. * Returns 0 on success.
  2557. */
  2558. int
  2559. qla25xx_pci_config(scsi_qla_host_t *vha)
  2560. {
  2561. uint16_t w;
  2562. struct qla_hw_data *ha = vha->hw;
  2563. pci_set_master(ha->pdev);
  2564. pci_try_set_mwi(ha->pdev);
  2565. pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
  2566. w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
  2567. w &= ~PCI_COMMAND_INTX_DISABLE;
  2568. pci_write_config_word(ha->pdev, PCI_COMMAND, w);
  2569. /* PCIe -- adjust Maximum Read Request Size (2048). */
  2570. if (pci_is_pcie(ha->pdev))
  2571. pcie_set_readrq(ha->pdev, 4096);
  2572. pci_disable_rom(ha->pdev);
  2573. ha->chip_revision = ha->pdev->revision;
  2574. return QLA_SUCCESS;
  2575. }
  2576. /**
  2577. * qla2x00_isp_firmware() - Choose firmware image.
  2578. * @vha: HA context
  2579. *
  2580. * Returns 0 on success.
  2581. */
  2582. static int
  2583. qla2x00_isp_firmware(scsi_qla_host_t *vha)
  2584. {
  2585. int rval;
  2586. uint16_t loop_id, topo, sw_cap;
  2587. uint8_t domain, area, al_pa;
  2588. struct qla_hw_data *ha = vha->hw;
  2589. /* Assume loading risc code */
  2590. rval = QLA_FUNCTION_FAILED;
  2591. if (ha->flags.disable_risc_code_load) {
  2592. ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
  2593. /* Verify checksum of loaded RISC code. */
  2594. rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
  2595. if (rval == QLA_SUCCESS) {
  2596. /* And, verify we are not in ROM code. */
  2597. rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
  2598. &area, &domain, &topo, &sw_cap);
  2599. }
  2600. }
  2601. if (rval)
  2602. ql_dbg(ql_dbg_init, vha, 0x007a,
  2603. "**** Load RISC code ****.\n");
  2604. return (rval);
  2605. }
  2606. /**
  2607. * qla2x00_reset_chip() - Reset ISP chip.
  2608. * @vha: HA context
  2609. *
  2610. * Returns 0 on success.
  2611. */
  2612. int
  2613. qla2x00_reset_chip(scsi_qla_host_t *vha)
  2614. {
  2615. unsigned long flags = 0;
  2616. struct qla_hw_data *ha = vha->hw;
  2617. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  2618. uint32_t cnt;
  2619. uint16_t cmd;
  2620. int rval = QLA_FUNCTION_FAILED;
  2621. if (unlikely(pci_channel_offline(ha->pdev)))
  2622. return rval;
  2623. ha->isp_ops->disable_intrs(ha);
  2624. spin_lock_irqsave(&ha->hardware_lock, flags);
  2625. /* Turn off master enable */
  2626. cmd = 0;
  2627. pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
  2628. cmd &= ~PCI_COMMAND_MASTER;
  2629. pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
  2630. if (!IS_QLA2100(ha)) {
  2631. /* Pause RISC. */
  2632. wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
  2633. if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
  2634. for (cnt = 0; cnt < 30000; cnt++) {
  2635. if ((rd_reg_word(&reg->hccr) &
  2636. HCCR_RISC_PAUSE) != 0)
  2637. break;
  2638. udelay(100);
  2639. }
  2640. } else {
  2641. rd_reg_word(&reg->hccr); /* PCI Posting. */
  2642. udelay(10);
  2643. }
  2644. /* Select FPM registers. */
  2645. wrt_reg_word(&reg->ctrl_status, 0x20);
  2646. rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
  2647. /* FPM Soft Reset. */
  2648. wrt_reg_word(&reg->fpm_diag_config, 0x100);
  2649. rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */
  2650. /* Toggle Fpm Reset. */
  2651. if (!IS_QLA2200(ha)) {
  2652. wrt_reg_word(&reg->fpm_diag_config, 0x0);
  2653. rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */
  2654. }
  2655. /* Select frame buffer registers. */
  2656. wrt_reg_word(&reg->ctrl_status, 0x10);
  2657. rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
  2658. /* Reset frame buffer FIFOs. */
  2659. if (IS_QLA2200(ha)) {
  2660. WRT_FB_CMD_REG(ha, reg, 0xa000);
  2661. RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
  2662. } else {
  2663. WRT_FB_CMD_REG(ha, reg, 0x00fc);
  2664. /* Read back fb_cmd until zero or 3 seconds max */
  2665. for (cnt = 0; cnt < 3000; cnt++) {
  2666. if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
  2667. break;
  2668. udelay(100);
  2669. }
  2670. }
  2671. /* Select RISC module registers. */
  2672. wrt_reg_word(&reg->ctrl_status, 0);
  2673. rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
  2674. /* Reset RISC processor. */
  2675. wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
  2676. rd_reg_word(&reg->hccr); /* PCI Posting. */
  2677. /* Release RISC processor. */
  2678. wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
  2679. rd_reg_word(&reg->hccr); /* PCI Posting. */
  2680. }
  2681. wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
  2682. wrt_reg_word(&reg->hccr, HCCR_CLR_HOST_INT);
  2683. /* Reset ISP chip. */
  2684. wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
  2685. /* Wait for RISC to recover from reset. */
  2686. if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
  2687. /*
  2688. * It is necessary to for a delay here since the card doesn't
  2689. * respond to PCI reads during a reset. On some architectures
  2690. * this will result in an MCA.
  2691. */
  2692. udelay(20);
  2693. for (cnt = 30000; cnt; cnt--) {
  2694. if ((rd_reg_word(&reg->ctrl_status) &
  2695. CSR_ISP_SOFT_RESET) == 0)
  2696. break;
  2697. udelay(100);
  2698. }
  2699. } else
  2700. udelay(10);
  2701. /* Reset RISC processor. */
  2702. wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
  2703. wrt_reg_word(&reg->semaphore, 0);
  2704. /* Release RISC processor. */
  2705. wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
  2706. rd_reg_word(&reg->hccr); /* PCI Posting. */
  2707. if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
  2708. for (cnt = 0; cnt < 30000; cnt++) {
  2709. if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
  2710. break;
  2711. udelay(100);
  2712. }
  2713. } else
  2714. udelay(100);
  2715. /* Turn on master enable */
  2716. cmd |= PCI_COMMAND_MASTER;
  2717. pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
  2718. /* Disable RISC pause on FPM parity error. */
  2719. if (!IS_QLA2100(ha)) {
  2720. wrt_reg_word(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
  2721. rd_reg_word(&reg->hccr); /* PCI Posting. */
  2722. }
  2723. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2724. return QLA_SUCCESS;
  2725. }
  2726. /**
  2727. * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
  2728. * @vha: HA context
  2729. *
  2730. * Returns 0 on success.
  2731. */
  2732. static int
  2733. qla81xx_reset_mpi(scsi_qla_host_t *vha)
  2734. {
  2735. uint16_t mb[4] = {0x1010, 0, 1, 0};
  2736. if (!IS_QLA81XX(vha->hw))
  2737. return QLA_SUCCESS;
  2738. return qla81xx_write_mpi_register(vha, mb);
  2739. }
  2740. static int
  2741. qla_chk_risc_recovery(scsi_qla_host_t *vha)
  2742. {
  2743. struct qla_hw_data *ha = vha->hw;
  2744. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  2745. __le16 __iomem *mbptr = &reg->mailbox0;
  2746. int i;
  2747. u16 mb[32];
  2748. int rc = QLA_SUCCESS;
  2749. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  2750. return rc;
  2751. /* this check is only valid after RISC reset */
  2752. mb[0] = rd_reg_word(mbptr);
  2753. mbptr++;
  2754. if (mb[0] == 0xf) {
  2755. rc = QLA_FUNCTION_FAILED;
  2756. for (i = 1; i < 32; i++) {
  2757. mb[i] = rd_reg_word(mbptr);
  2758. mbptr++;
  2759. }
  2760. ql_log(ql_log_warn, vha, 0x1015,
  2761. "RISC reset failed. mb[0-7] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
  2762. mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6], mb[7]);
  2763. ql_log(ql_log_warn, vha, 0x1015,
  2764. "RISC reset failed. mb[8-15] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
  2765. mb[8], mb[9], mb[10], mb[11], mb[12], mb[13], mb[14],
  2766. mb[15]);
  2767. ql_log(ql_log_warn, vha, 0x1015,
  2768. "RISC reset failed. mb[16-23] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
  2769. mb[16], mb[17], mb[18], mb[19], mb[20], mb[21], mb[22],
  2770. mb[23]);
  2771. ql_log(ql_log_warn, vha, 0x1015,
  2772. "RISC reset failed. mb[24-31] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
  2773. mb[24], mb[25], mb[26], mb[27], mb[28], mb[29], mb[30],
  2774. mb[31]);
  2775. }
  2776. return rc;
  2777. }
  2778. /**
  2779. * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
  2780. * @vha: HA context
  2781. *
  2782. * Returns 0 on success.
  2783. */
  2784. static inline int
  2785. qla24xx_reset_risc(scsi_qla_host_t *vha)
  2786. {
  2787. unsigned long flags = 0;
  2788. struct qla_hw_data *ha = vha->hw;
  2789. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  2790. uint32_t cnt;
  2791. uint16_t wd;
  2792. static int abts_cnt; /* ISP abort retry counts */
  2793. int rval = QLA_SUCCESS;
  2794. int print = 1;
  2795. spin_lock_irqsave(&ha->hardware_lock, flags);
  2796. /* Reset RISC. */
  2797. wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
  2798. for (cnt = 0; cnt < 30000; cnt++) {
  2799. if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
  2800. break;
  2801. udelay(10);
  2802. }
  2803. if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
  2804. set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
  2805. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
  2806. "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
  2807. rd_reg_dword(&reg->hccr),
  2808. rd_reg_dword(&reg->ctrl_status),
  2809. (rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
  2810. wrt_reg_dword(&reg->ctrl_status,
  2811. CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
  2812. pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
  2813. udelay(100);
  2814. /* Wait for firmware to complete NVRAM accesses. */
  2815. rd_reg_word(&reg->mailbox0);
  2816. for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 &&
  2817. rval == QLA_SUCCESS; cnt--) {
  2818. barrier();
  2819. if (cnt)
  2820. udelay(5);
  2821. else
  2822. rval = QLA_FUNCTION_TIMEOUT;
  2823. }
  2824. if (rval == QLA_SUCCESS)
  2825. set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
  2826. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
  2827. "HCCR: 0x%x, MailBox0 Status 0x%x\n",
  2828. rd_reg_dword(&reg->hccr),
  2829. rd_reg_word(&reg->mailbox0));
  2830. /* Wait for soft-reset to complete. */
  2831. rd_reg_dword(&reg->ctrl_status);
  2832. for (cnt = 0; cnt < 60; cnt++) {
  2833. barrier();
  2834. if ((rd_reg_dword(&reg->ctrl_status) &
  2835. CSRX_ISP_SOFT_RESET) == 0)
  2836. break;
  2837. udelay(5);
  2838. }
  2839. if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
  2840. set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
  2841. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
  2842. "HCCR: 0x%x, Soft Reset status: 0x%x\n",
  2843. rd_reg_dword(&reg->hccr),
  2844. rd_reg_dword(&reg->ctrl_status));
  2845. /* If required, do an MPI FW reset now */
  2846. if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
  2847. if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
  2848. if (++abts_cnt < 5) {
  2849. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2850. set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
  2851. } else {
  2852. /*
  2853. * We exhausted the ISP abort retries. We have to
  2854. * set the board offline.
  2855. */
  2856. abts_cnt = 0;
  2857. vha->flags.online = 0;
  2858. }
  2859. }
  2860. }
  2861. wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
  2862. rd_reg_dword(&reg->hccr);
  2863. wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
  2864. rd_reg_dword(&reg->hccr);
  2865. wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
  2866. mdelay(10);
  2867. rd_reg_dword(&reg->hccr);
  2868. wd = rd_reg_word(&reg->mailbox0);
  2869. for (cnt = 300; wd != 0 && rval == QLA_SUCCESS; cnt--) {
  2870. barrier();
  2871. if (cnt) {
  2872. mdelay(1);
  2873. if (print && qla_chk_risc_recovery(vha))
  2874. print = 0;
  2875. wd = rd_reg_word(&reg->mailbox0);
  2876. } else {
  2877. rval = QLA_FUNCTION_TIMEOUT;
  2878. ql_log(ql_log_warn, vha, 0x015e,
  2879. "RISC reset timeout\n");
  2880. }
  2881. }
  2882. if (rval == QLA_SUCCESS)
  2883. set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
  2884. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
  2885. "Host Risc 0x%x, mailbox0 0x%x\n",
  2886. rd_reg_dword(&reg->hccr),
  2887. rd_reg_word(&reg->mailbox0));
  2888. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2889. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
  2890. "Driver in %s mode\n",
  2891. IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
  2892. if (IS_NOPOLLING_TYPE(ha))
  2893. ha->isp_ops->enable_intrs(ha);
  2894. return rval;
  2895. }
  2896. static void
  2897. qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
  2898. {
  2899. struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
  2900. wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
  2901. *data = rd_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET);
  2902. }
  2903. static void
  2904. qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
  2905. {
  2906. struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
  2907. wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
  2908. wrt_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data);
  2909. }
  2910. static void
  2911. qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
  2912. {
  2913. uint32_t wd32 = 0;
  2914. uint delta_msec = 100;
  2915. uint elapsed_msec = 0;
  2916. uint timeout_msec;
  2917. ulong n;
  2918. if (vha->hw->pdev->subsystem_device != 0x0175 &&
  2919. vha->hw->pdev->subsystem_device != 0x0240)
  2920. return;
  2921. wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
  2922. udelay(100);
  2923. attempt:
  2924. timeout_msec = TIMEOUT_SEMAPHORE;
  2925. n = timeout_msec / delta_msec;
  2926. while (n--) {
  2927. qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
  2928. qla25xx_read_risc_sema_reg(vha, &wd32);
  2929. if (wd32 & RISC_SEMAPHORE)
  2930. break;
  2931. msleep(delta_msec);
  2932. elapsed_msec += delta_msec;
  2933. if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
  2934. goto force;
  2935. }
  2936. if (!(wd32 & RISC_SEMAPHORE))
  2937. goto force;
  2938. if (!(wd32 & RISC_SEMAPHORE_FORCE))
  2939. goto acquired;
  2940. qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
  2941. timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
  2942. n = timeout_msec / delta_msec;
  2943. while (n--) {
  2944. qla25xx_read_risc_sema_reg(vha, &wd32);
  2945. if (!(wd32 & RISC_SEMAPHORE_FORCE))
  2946. break;
  2947. msleep(delta_msec);
  2948. elapsed_msec += delta_msec;
  2949. if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
  2950. goto force;
  2951. }
  2952. if (wd32 & RISC_SEMAPHORE_FORCE)
  2953. qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
  2954. goto attempt;
  2955. force:
  2956. qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
  2957. acquired:
  2958. return;
  2959. }
  2960. /**
  2961. * qla24xx_reset_chip() - Reset ISP24xx chip.
  2962. * @vha: HA context
  2963. *
  2964. * Returns 0 on success.
  2965. */
  2966. int
  2967. qla24xx_reset_chip(scsi_qla_host_t *vha)
  2968. {
  2969. struct qla_hw_data *ha = vha->hw;
  2970. int rval = QLA_FUNCTION_FAILED;
  2971. if (pci_channel_offline(ha->pdev) &&
  2972. ha->flags.pci_channel_io_perm_failure) {
  2973. return rval;
  2974. }
  2975. ha->isp_ops->disable_intrs(ha);
  2976. qla25xx_manipulate_risc_semaphore(vha);
  2977. /* Perform RISC reset. */
  2978. rval = qla24xx_reset_risc(vha);
  2979. return rval;
  2980. }
  2981. /**
  2982. * qla2x00_chip_diag() - Test chip for proper operation.
  2983. * @vha: HA context
  2984. *
  2985. * Returns 0 on success.
  2986. */
  2987. int
  2988. qla2x00_chip_diag(scsi_qla_host_t *vha)
  2989. {
  2990. int rval;
  2991. struct qla_hw_data *ha = vha->hw;
  2992. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  2993. unsigned long flags = 0;
  2994. uint16_t data;
  2995. uint32_t cnt;
  2996. uint16_t mb[5];
  2997. struct req_que *req = ha->req_q_map[0];
  2998. /* Assume a failed state */
  2999. rval = QLA_FUNCTION_FAILED;
  3000. ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
  3001. &reg->flash_address);
  3002. spin_lock_irqsave(&ha->hardware_lock, flags);
  3003. /* Reset ISP chip. */
  3004. wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
  3005. /*
  3006. * We need to have a delay here since the card will not respond while
  3007. * in reset causing an MCA on some architectures.
  3008. */
  3009. udelay(20);
  3010. data = qla2x00_debounce_register(&reg->ctrl_status);
  3011. for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
  3012. udelay(5);
  3013. data = rd_reg_word(&reg->ctrl_status);
  3014. barrier();
  3015. }
  3016. if (!cnt)
  3017. goto chip_diag_failed;
  3018. ql_dbg(ql_dbg_init, vha, 0x007c,
  3019. "Reset register cleared by chip reset.\n");
  3020. /* Reset RISC processor. */
  3021. wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
  3022. wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
  3023. /* Workaround for QLA2312 PCI parity error */
  3024. if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
  3025. data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
  3026. for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
  3027. udelay(5);
  3028. data = RD_MAILBOX_REG(ha, reg, 0);
  3029. barrier();
  3030. }
  3031. } else
  3032. udelay(10);
  3033. if (!cnt)
  3034. goto chip_diag_failed;
  3035. /* Check product ID of chip */
  3036. ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
  3037. mb[1] = RD_MAILBOX_REG(ha, reg, 1);
  3038. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  3039. mb[3] = RD_MAILBOX_REG(ha, reg, 3);
  3040. mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
  3041. if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
  3042. mb[3] != PROD_ID_3) {
  3043. ql_log(ql_log_warn, vha, 0x0062,
  3044. "Wrong product ID = 0x%x,0x%x,0x%x.\n",
  3045. mb[1], mb[2], mb[3]);
  3046. goto chip_diag_failed;
  3047. }
  3048. ha->product_id[0] = mb[1];
  3049. ha->product_id[1] = mb[2];
  3050. ha->product_id[2] = mb[3];
  3051. ha->product_id[3] = mb[4];
  3052. /* Adjust fw RISC transfer size */
  3053. if (req->length > 1024)
  3054. ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
  3055. else
  3056. ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
  3057. req->length;
  3058. if (IS_QLA2200(ha) &&
  3059. RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
  3060. /* Limit firmware transfer size with a 2200A */
  3061. ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
  3062. ha->device_type |= DT_ISP2200A;
  3063. ha->fw_transfer_size = 128;
  3064. }
  3065. /* Wrap Incoming Mailboxes Test. */
  3066. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3067. ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
  3068. rval = qla2x00_mbx_reg_test(vha);
  3069. if (rval)
  3070. ql_log(ql_log_warn, vha, 0x0080,
  3071. "Failed mailbox send register test.\n");
  3072. else
  3073. /* Flag a successful rval */
  3074. rval = QLA_SUCCESS;
  3075. spin_lock_irqsave(&ha->hardware_lock, flags);
  3076. chip_diag_failed:
  3077. if (rval)
  3078. ql_log(ql_log_info, vha, 0x0081,
  3079. "Chip diagnostics **** FAILED ****.\n");
  3080. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3081. return (rval);
  3082. }
  3083. /**
  3084. * qla24xx_chip_diag() - Test ISP24xx for proper operation.
  3085. * @vha: HA context
  3086. *
  3087. * Returns 0 on success.
  3088. */
  3089. int
  3090. qla24xx_chip_diag(scsi_qla_host_t *vha)
  3091. {
  3092. int rval;
  3093. struct qla_hw_data *ha = vha->hw;
  3094. struct req_que *req = ha->req_q_map[0];
  3095. if (IS_P3P_TYPE(ha))
  3096. return QLA_SUCCESS;
  3097. ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
  3098. rval = qla2x00_mbx_reg_test(vha);
  3099. if (rval) {
  3100. ql_log(ql_log_warn, vha, 0x0082,
  3101. "Failed mailbox send register test.\n");
  3102. } else {
  3103. /* Flag a successful rval */
  3104. rval = QLA_SUCCESS;
  3105. }
  3106. return rval;
  3107. }
  3108. static void
  3109. qla2x00_init_fce_trace(scsi_qla_host_t *vha)
  3110. {
  3111. int rval;
  3112. dma_addr_t tc_dma;
  3113. void *tc;
  3114. struct qla_hw_data *ha = vha->hw;
  3115. if (!IS_FWI2_CAPABLE(ha))
  3116. return;
  3117. if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
  3118. !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  3119. return;
  3120. if (ha->fce) {
  3121. ql_dbg(ql_dbg_init, vha, 0x00bd,
  3122. "%s: FCE Mem is already allocated.\n",
  3123. __func__);
  3124. return;
  3125. }
  3126. /* Allocate memory for Fibre Channel Event Buffer. */
  3127. tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
  3128. GFP_KERNEL);
  3129. if (!tc) {
  3130. ql_log(ql_log_warn, vha, 0x00be,
  3131. "Unable to allocate (%d KB) for FCE.\n",
  3132. FCE_SIZE / 1024);
  3133. return;
  3134. }
  3135. rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
  3136. ha->fce_mb, &ha->fce_bufs);
  3137. if (rval) {
  3138. ql_log(ql_log_warn, vha, 0x00bf,
  3139. "Unable to initialize FCE (%d).\n", rval);
  3140. dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
  3141. return;
  3142. }
  3143. ql_dbg(ql_dbg_init, vha, 0x00c0,
  3144. "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
  3145. ha->flags.fce_enabled = 1;
  3146. ha->fce_dma = tc_dma;
  3147. ha->fce = tc;
  3148. }
  3149. static void
  3150. qla2x00_init_eft_trace(scsi_qla_host_t *vha)
  3151. {
  3152. int rval;
  3153. dma_addr_t tc_dma;
  3154. void *tc;
  3155. struct qla_hw_data *ha = vha->hw;
  3156. if (!IS_FWI2_CAPABLE(ha))
  3157. return;
  3158. if (ha->eft) {
  3159. ql_dbg(ql_dbg_init, vha, 0x00bd,
  3160. "%s: EFT Mem is already allocated.\n",
  3161. __func__);
  3162. return;
  3163. }
  3164. /* Allocate memory for Extended Trace Buffer. */
  3165. tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
  3166. GFP_KERNEL);
  3167. if (!tc) {
  3168. ql_log(ql_log_warn, vha, 0x00c1,
  3169. "Unable to allocate (%d KB) for EFT.\n",
  3170. EFT_SIZE / 1024);
  3171. return;
  3172. }
  3173. rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
  3174. if (rval) {
  3175. ql_log(ql_log_warn, vha, 0x00c2,
  3176. "Unable to initialize EFT (%d).\n", rval);
  3177. dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
  3178. return;
  3179. }
  3180. ql_dbg(ql_dbg_init, vha, 0x00c3,
  3181. "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
  3182. ha->eft_dma = tc_dma;
  3183. ha->eft = tc;
  3184. }
  3185. static void
  3186. qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
  3187. {
  3188. qla2x00_init_fce_trace(vha);
  3189. qla2x00_init_eft_trace(vha);
  3190. }
  3191. void
  3192. qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
  3193. {
  3194. uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
  3195. eft_size, fce_size, mq_size;
  3196. struct qla_hw_data *ha = vha->hw;
  3197. struct req_que *req = ha->req_q_map[0];
  3198. struct rsp_que *rsp = ha->rsp_q_map[0];
  3199. struct qla2xxx_fw_dump *fw_dump;
  3200. if (ha->fw_dump) {
  3201. ql_dbg(ql_dbg_init, vha, 0x00bd,
  3202. "Firmware dump already allocated.\n");
  3203. return;
  3204. }
  3205. ha->fw_dumped = 0;
  3206. ha->fw_dump_cap_flags = 0;
  3207. dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
  3208. req_q_size = rsp_q_size = 0;
  3209. if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
  3210. fixed_size = sizeof(struct qla2100_fw_dump);
  3211. } else if (IS_QLA23XX(ha)) {
  3212. fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
  3213. mem_size = (ha->fw_memory_size - 0x11000 + 1) *
  3214. sizeof(uint16_t);
  3215. } else if (IS_FWI2_CAPABLE(ha)) {
  3216. if (IS_QLA83XX(ha))
  3217. fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
  3218. else if (IS_QLA81XX(ha))
  3219. fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
  3220. else if (IS_QLA25XX(ha))
  3221. fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
  3222. else
  3223. fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
  3224. mem_size = (ha->fw_memory_size - 0x100000 + 1) *
  3225. sizeof(uint32_t);
  3226. if (ha->mqenable) {
  3227. if (!IS_QLA83XX(ha))
  3228. mq_size = sizeof(struct qla2xxx_mq_chain);
  3229. /*
  3230. * Allocate maximum buffer size for all queues - Q0.
  3231. * Resizing must be done at end-of-dump processing.
  3232. */
  3233. mq_size += (ha->max_req_queues - 1) *
  3234. (req->length * sizeof(request_t));
  3235. mq_size += (ha->max_rsp_queues - 1) *
  3236. (rsp->length * sizeof(response_t));
  3237. }
  3238. if (ha->tgt.atio_ring)
  3239. mq_size += ha->tgt.atio_q_length * sizeof(request_t);
  3240. qla2x00_init_fce_trace(vha);
  3241. if (ha->fce)
  3242. fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
  3243. qla2x00_init_eft_trace(vha);
  3244. if (ha->eft)
  3245. eft_size = EFT_SIZE;
  3246. }
  3247. if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
  3248. struct fwdt *fwdt = ha->fwdt;
  3249. uint j;
  3250. for (j = 0; j < 2; j++, fwdt++) {
  3251. if (!fwdt->template) {
  3252. ql_dbg(ql_dbg_init, vha, 0x00ba,
  3253. "-> fwdt%u no template\n", j);
  3254. continue;
  3255. }
  3256. ql_dbg(ql_dbg_init, vha, 0x00fa,
  3257. "-> fwdt%u calculating fwdump size...\n", j);
  3258. fwdt->dump_size = qla27xx_fwdt_calculate_dump_size(
  3259. vha, fwdt->template);
  3260. ql_dbg(ql_dbg_init, vha, 0x00fa,
  3261. "-> fwdt%u calculated fwdump size = %#lx bytes\n",
  3262. j, fwdt->dump_size);
  3263. dump_size += fwdt->dump_size;
  3264. }
  3265. /* Add space for spare MPI fw dump. */
  3266. dump_size += ha->fwdt[1].dump_size;
  3267. } else {
  3268. req_q_size = req->length * sizeof(request_t);
  3269. rsp_q_size = rsp->length * sizeof(response_t);
  3270. dump_size = offsetof(struct qla2xxx_fw_dump, isp);
  3271. dump_size += fixed_size + mem_size + req_q_size + rsp_q_size
  3272. + eft_size;
  3273. ha->chain_offset = dump_size;
  3274. dump_size += mq_size + fce_size;
  3275. if (ha->exchoffld_buf)
  3276. dump_size += sizeof(struct qla2xxx_offld_chain) +
  3277. ha->exchoffld_size;
  3278. if (ha->exlogin_buf)
  3279. dump_size += sizeof(struct qla2xxx_offld_chain) +
  3280. ha->exlogin_size;
  3281. }
  3282. if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
  3283. ql_dbg(ql_dbg_init, vha, 0x00c5,
  3284. "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n",
  3285. __func__, dump_size, ha->fw_dump_len,
  3286. ha->fw_dump_alloc_len);
  3287. fw_dump = vmalloc(dump_size);
  3288. if (!fw_dump) {
  3289. ql_log(ql_log_warn, vha, 0x00c4,
  3290. "Unable to allocate (%d KB) for firmware dump.\n",
  3291. dump_size / 1024);
  3292. } else {
  3293. mutex_lock(&ha->optrom_mutex);
  3294. if (ha->fw_dumped) {
  3295. memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len);
  3296. vfree(ha->fw_dump);
  3297. ha->fw_dump = fw_dump;
  3298. ha->fw_dump_alloc_len = dump_size;
  3299. ql_dbg(ql_dbg_init, vha, 0x00c5,
  3300. "Re-Allocated (%d KB) and save firmware dump.\n",
  3301. dump_size / 1024);
  3302. } else {
  3303. vfree(ha->fw_dump);
  3304. ha->fw_dump = fw_dump;
  3305. ha->fw_dump_len = ha->fw_dump_alloc_len =
  3306. dump_size;
  3307. ql_dbg(ql_dbg_init, vha, 0x00c5,
  3308. "Allocated (%d KB) for firmware dump.\n",
  3309. dump_size / 1024);
  3310. if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
  3311. ha->mpi_fw_dump = (char *)fw_dump +
  3312. ha->fwdt[1].dump_size;
  3313. mutex_unlock(&ha->optrom_mutex);
  3314. return;
  3315. }
  3316. ha->fw_dump->signature[0] = 'Q';
  3317. ha->fw_dump->signature[1] = 'L';
  3318. ha->fw_dump->signature[2] = 'G';
  3319. ha->fw_dump->signature[3] = 'C';
  3320. ha->fw_dump->version = htonl(1);
  3321. ha->fw_dump->fixed_size = htonl(fixed_size);
  3322. ha->fw_dump->mem_size = htonl(mem_size);
  3323. ha->fw_dump->req_q_size = htonl(req_q_size);
  3324. ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
  3325. ha->fw_dump->eft_size = htonl(eft_size);
  3326. ha->fw_dump->eft_addr_l =
  3327. htonl(LSD(ha->eft_dma));
  3328. ha->fw_dump->eft_addr_h =
  3329. htonl(MSD(ha->eft_dma));
  3330. ha->fw_dump->header_size =
  3331. htonl(offsetof
  3332. (struct qla2xxx_fw_dump, isp));
  3333. }
  3334. mutex_unlock(&ha->optrom_mutex);
  3335. }
  3336. }
  3337. }
  3338. static int
  3339. qla81xx_mpi_sync(scsi_qla_host_t *vha)
  3340. {
  3341. #define MPS_MASK 0xe0
  3342. int rval;
  3343. uint16_t dc;
  3344. uint32_t dw;
  3345. if (!IS_QLA81XX(vha->hw))
  3346. return QLA_SUCCESS;
  3347. rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
  3348. if (rval != QLA_SUCCESS) {
  3349. ql_log(ql_log_warn, vha, 0x0105,
  3350. "Unable to acquire semaphore.\n");
  3351. goto done;
  3352. }
  3353. pci_read_config_word(vha->hw->pdev, 0x54, &dc);
  3354. rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
  3355. if (rval != QLA_SUCCESS) {
  3356. ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
  3357. goto done_release;
  3358. }
  3359. dc &= MPS_MASK;
  3360. if (dc == (dw & MPS_MASK))
  3361. goto done_release;
  3362. dw &= ~MPS_MASK;
  3363. dw |= dc;
  3364. rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
  3365. if (rval != QLA_SUCCESS) {
  3366. ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
  3367. }
  3368. done_release:
  3369. rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
  3370. if (rval != QLA_SUCCESS) {
  3371. ql_log(ql_log_warn, vha, 0x006d,
  3372. "Unable to release semaphore.\n");
  3373. }
  3374. done:
  3375. return rval;
  3376. }
  3377. int
  3378. qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
  3379. {
  3380. /* Don't try to reallocate the array */
  3381. if (req->outstanding_cmds)
  3382. return QLA_SUCCESS;
  3383. if (!IS_FWI2_CAPABLE(ha))
  3384. req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
  3385. else {
  3386. if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
  3387. req->num_outstanding_cmds = ha->cur_fw_xcb_count;
  3388. else
  3389. req->num_outstanding_cmds = ha->cur_fw_iocb_count;
  3390. }
  3391. req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
  3392. sizeof(srb_t *),
  3393. GFP_KERNEL);
  3394. if (!req->outstanding_cmds) {
  3395. /*
  3396. * Try to allocate a minimal size just so we can get through
  3397. * initialization.
  3398. */
  3399. req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
  3400. req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
  3401. sizeof(srb_t *),
  3402. GFP_KERNEL);
  3403. if (!req->outstanding_cmds) {
  3404. ql_log(ql_log_fatal, NULL, 0x0126,
  3405. "Failed to allocate memory for "
  3406. "outstanding_cmds for req_que %p.\n", req);
  3407. req->num_outstanding_cmds = 0;
  3408. return QLA_FUNCTION_FAILED;
  3409. }
  3410. }
  3411. return QLA_SUCCESS;
  3412. }
  3413. #define PRINT_FIELD(_field, _flag, _str) { \
  3414. if (a0->_field & _flag) {\
  3415. if (p) {\
  3416. strcat(ptr, "|");\
  3417. ptr++;\
  3418. leftover--;\
  3419. } \
  3420. len = snprintf(ptr, leftover, "%s", _str); \
  3421. p = 1;\
  3422. leftover -= len;\
  3423. ptr += len; \
  3424. } \
  3425. }
  3426. static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
  3427. {
  3428. #define STR_LEN 64
  3429. struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
  3430. u8 str[STR_LEN], *ptr, p;
  3431. int leftover, len;
  3432. memset(str, 0, STR_LEN);
  3433. snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
  3434. ql_dbg(ql_dbg_init, vha, 0x015a,
  3435. "SFP MFG Name: %s\n", str);
  3436. memset(str, 0, STR_LEN);
  3437. snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
  3438. ql_dbg(ql_dbg_init, vha, 0x015c,
  3439. "SFP Part Name: %s\n", str);
  3440. /* media */
  3441. memset(str, 0, STR_LEN);
  3442. ptr = str;
  3443. leftover = STR_LEN;
  3444. p = len = 0;
  3445. PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
  3446. PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
  3447. PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
  3448. PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
  3449. PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
  3450. PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
  3451. PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
  3452. ql_dbg(ql_dbg_init, vha, 0x0160,
  3453. "SFP Media: %s\n", str);
  3454. /* link length */
  3455. memset(str, 0, STR_LEN);
  3456. ptr = str;
  3457. leftover = STR_LEN;
  3458. p = len = 0;
  3459. PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
  3460. PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
  3461. PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
  3462. PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
  3463. PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
  3464. ql_dbg(ql_dbg_init, vha, 0x0196,
  3465. "SFP Link Length: %s\n", str);
  3466. memset(str, 0, STR_LEN);
  3467. ptr = str;
  3468. leftover = STR_LEN;
  3469. p = len = 0;
  3470. PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
  3471. PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
  3472. PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
  3473. PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
  3474. PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
  3475. ql_dbg(ql_dbg_init, vha, 0x016e,
  3476. "SFP FC Link Tech: %s\n", str);
  3477. if (a0->length_km)
  3478. ql_dbg(ql_dbg_init, vha, 0x016f,
  3479. "SFP Distant: %d km\n", a0->length_km);
  3480. if (a0->length_100m)
  3481. ql_dbg(ql_dbg_init, vha, 0x0170,
  3482. "SFP Distant: %d m\n", a0->length_100m*100);
  3483. if (a0->length_50um_10m)
  3484. ql_dbg(ql_dbg_init, vha, 0x0189,
  3485. "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
  3486. if (a0->length_62um_10m)
  3487. ql_dbg(ql_dbg_init, vha, 0x018a,
  3488. "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
  3489. if (a0->length_om4_10m)
  3490. ql_dbg(ql_dbg_init, vha, 0x0194,
  3491. "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
  3492. if (a0->length_om3_10m)
  3493. ql_dbg(ql_dbg_init, vha, 0x0195,
  3494. "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
  3495. }
  3496. /**
  3497. * qla24xx_detect_sfp()
  3498. *
  3499. * @vha: adapter state pointer.
  3500. *
  3501. * @return
  3502. * 0 -- Configure firmware to use short-range settings -- normal
  3503. * buffer-to-buffer credits.
  3504. *
  3505. * 1 -- Configure firmware to use long-range settings -- extra
  3506. * buffer-to-buffer credits should be allocated with
  3507. * ha->lr_distance containing distance settings from NVRAM or SFP
  3508. * (if supported).
  3509. */
  3510. int
  3511. qla24xx_detect_sfp(scsi_qla_host_t *vha)
  3512. {
  3513. int rc, used_nvram;
  3514. struct sff_8247_a0 *a;
  3515. struct qla_hw_data *ha = vha->hw;
  3516. struct nvram_81xx *nv = ha->nvram;
  3517. #define LR_DISTANCE_UNKNOWN 2
  3518. static const char * const types[] = { "Short", "Long" };
  3519. static const char * const lengths[] = { "(10km)", "(5km)", "" };
  3520. u8 ll = 0;
  3521. /* Seed with NVRAM settings. */
  3522. used_nvram = 0;
  3523. ha->flags.lr_detected = 0;
  3524. if (IS_BPM_RANGE_CAPABLE(ha) &&
  3525. (nv->enhanced_features & NEF_LR_DIST_ENABLE)) {
  3526. used_nvram = 1;
  3527. ha->flags.lr_detected = 1;
  3528. ha->lr_distance =
  3529. (nv->enhanced_features >> LR_DIST_NV_POS)
  3530. & LR_DIST_NV_MASK;
  3531. }
  3532. if (!IS_BPM_ENABLED(vha))
  3533. goto out;
  3534. /* Determine SR/LR capabilities of SFP/Transceiver. */
  3535. rc = qla2x00_read_sfp_dev(vha, NULL, 0);
  3536. if (rc)
  3537. goto out;
  3538. used_nvram = 0;
  3539. a = (struct sff_8247_a0 *)vha->hw->sfp_data;
  3540. qla2xxx_print_sfp_info(vha);
  3541. ha->flags.lr_detected = 0;
  3542. ll = a->fc_ll_cc7;
  3543. if (ll & FC_LL_VL || ll & FC_LL_L) {
  3544. /* Long range, track length. */
  3545. ha->flags.lr_detected = 1;
  3546. if (a->length_km > 5 || a->length_100m > 50)
  3547. ha->lr_distance = LR_DISTANCE_10K;
  3548. else
  3549. ha->lr_distance = LR_DISTANCE_5K;
  3550. }
  3551. out:
  3552. ql_dbg(ql_dbg_async, vha, 0x507b,
  3553. "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n",
  3554. types[ha->flags.lr_detected],
  3555. ha->flags.lr_detected ? lengths[ha->lr_distance] :
  3556. lengths[LR_DISTANCE_UNKNOWN],
  3557. used_nvram, ll, ha->flags.lr_detected, ha->lr_distance);
  3558. return ha->flags.lr_detected;
  3559. }
  3560. static void __qla_adjust_iocb_limit(struct qla_qpair *qpair)
  3561. {
  3562. u8 num_qps;
  3563. u16 limit;
  3564. struct qla_hw_data *ha = qpair->vha->hw;
  3565. num_qps = ha->num_qpairs + 1;
  3566. limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
  3567. qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
  3568. qpair->fwres.iocbs_limit = limit;
  3569. qpair->fwres.iocbs_qp_limit = limit / num_qps;
  3570. qpair->fwres.exch_total = ha->orig_fw_xcb_count;
  3571. qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
  3572. QLA_IOCB_PCT_LIMIT) / 100;
  3573. }
  3574. void qla_init_iocb_limit(scsi_qla_host_t *vha)
  3575. {
  3576. u8 i;
  3577. struct qla_hw_data *ha = vha->hw;
  3578. __qla_adjust_iocb_limit(ha->base_qpair);
  3579. ha->base_qpair->fwres.iocbs_used = 0;
  3580. ha->base_qpair->fwres.exch_used = 0;
  3581. for (i = 0; i < ha->max_qpairs; i++) {
  3582. if (ha->queue_pair_map[i]) {
  3583. __qla_adjust_iocb_limit(ha->queue_pair_map[i]);
  3584. ha->queue_pair_map[i]->fwres.iocbs_used = 0;
  3585. ha->queue_pair_map[i]->fwres.exch_used = 0;
  3586. }
  3587. }
  3588. ha->fwres.iocb_total = ha->orig_fw_iocb_count;
  3589. ha->fwres.iocb_limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
  3590. ha->fwres.exch_total = ha->orig_fw_xcb_count;
  3591. ha->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
  3592. atomic_set(&ha->fwres.iocb_used, 0);
  3593. atomic_set(&ha->fwres.exch_used, 0);
  3594. }
  3595. void qla_adjust_iocb_limit(scsi_qla_host_t *vha)
  3596. {
  3597. u8 i;
  3598. struct qla_hw_data *ha = vha->hw;
  3599. __qla_adjust_iocb_limit(ha->base_qpair);
  3600. for (i = 0; i < ha->max_qpairs; i++) {
  3601. if (ha->queue_pair_map[i])
  3602. __qla_adjust_iocb_limit(ha->queue_pair_map[i]);
  3603. }
  3604. }
  3605. /**
  3606. * qla2x00_setup_chip() - Load and start RISC firmware.
  3607. * @vha: HA context
  3608. *
  3609. * Returns 0 on success.
  3610. */
  3611. static int
  3612. qla2x00_setup_chip(scsi_qla_host_t *vha)
  3613. {
  3614. int rval;
  3615. uint32_t srisc_address = 0;
  3616. struct qla_hw_data *ha = vha->hw;
  3617. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  3618. unsigned long flags;
  3619. uint16_t fw_major_version;
  3620. int done_once = 0;
  3621. if (IS_P3P_TYPE(ha)) {
  3622. rval = ha->isp_ops->load_risc(vha, &srisc_address);
  3623. if (rval == QLA_SUCCESS) {
  3624. qla2x00_stop_firmware(vha);
  3625. goto enable_82xx_npiv;
  3626. } else
  3627. goto failed;
  3628. }
  3629. if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
  3630. /* Disable SRAM, Instruction RAM and GP RAM parity. */
  3631. spin_lock_irqsave(&ha->hardware_lock, flags);
  3632. wrt_reg_word(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
  3633. rd_reg_word(&reg->hccr);
  3634. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3635. }
  3636. qla81xx_mpi_sync(vha);
  3637. execute_fw_with_lr:
  3638. /* Load firmware sequences */
  3639. rval = ha->isp_ops->load_risc(vha, &srisc_address);
  3640. if (rval == QLA_SUCCESS) {
  3641. ql_dbg(ql_dbg_init, vha, 0x00c9,
  3642. "Verifying Checksum of loaded RISC code.\n");
  3643. rval = qla2x00_verify_checksum(vha, srisc_address);
  3644. if (rval == QLA_SUCCESS) {
  3645. /* Start firmware execution. */
  3646. ql_dbg(ql_dbg_init, vha, 0x00ca,
  3647. "Starting firmware.\n");
  3648. if (ql2xexlogins)
  3649. ha->flags.exlogins_enabled = 1;
  3650. if (qla_is_exch_offld_enabled(vha))
  3651. ha->flags.exchoffld_enabled = 1;
  3652. rval = qla2x00_execute_fw(vha, srisc_address);
  3653. /* Retrieve firmware information. */
  3654. if (rval == QLA_SUCCESS) {
  3655. /* Enable BPM support? */
  3656. if (!done_once++ && qla24xx_detect_sfp(vha)) {
  3657. ql_dbg(ql_dbg_init, vha, 0x00ca,
  3658. "Re-starting firmware -- BPM.\n");
  3659. /* Best-effort - re-init. */
  3660. ha->isp_ops->reset_chip(vha);
  3661. ha->isp_ops->chip_diag(vha);
  3662. goto execute_fw_with_lr;
  3663. }
  3664. if (IS_ZIO_THRESHOLD_CAPABLE(ha))
  3665. qla27xx_set_zio_threshold(vha,
  3666. ha->last_zio_threshold);
  3667. rval = qla2x00_set_exlogins_buffer(vha);
  3668. if (rval != QLA_SUCCESS)
  3669. goto failed;
  3670. rval = qla2x00_set_exchoffld_buffer(vha);
  3671. if (rval != QLA_SUCCESS)
  3672. goto failed;
  3673. enable_82xx_npiv:
  3674. fw_major_version = ha->fw_major_version;
  3675. if (IS_P3P_TYPE(ha))
  3676. qla82xx_check_md_needed(vha);
  3677. else
  3678. rval = qla2x00_get_fw_version(vha);
  3679. if (rval != QLA_SUCCESS)
  3680. goto failed;
  3681. ha->flags.npiv_supported = 0;
  3682. if (IS_QLA2XXX_MIDTYPE(ha) &&
  3683. (ha->fw_attributes & BIT_2)) {
  3684. ha->flags.npiv_supported = 1;
  3685. if ((!ha->max_npiv_vports) ||
  3686. ((ha->max_npiv_vports + 1) %
  3687. MIN_MULTI_ID_FABRIC))
  3688. ha->max_npiv_vports =
  3689. MIN_MULTI_ID_FABRIC - 1;
  3690. }
  3691. qla2x00_get_resource_cnts(vha);
  3692. qla_init_iocb_limit(vha);
  3693. /*
  3694. * Allocate the array of outstanding commands
  3695. * now that we know the firmware resources.
  3696. */
  3697. rval = qla2x00_alloc_outstanding_cmds(ha,
  3698. vha->req);
  3699. if (rval != QLA_SUCCESS)
  3700. goto failed;
  3701. if (!fw_major_version && !(IS_P3P_TYPE(ha)))
  3702. qla2x00_alloc_offload_mem(vha);
  3703. if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
  3704. qla2x00_alloc_fw_dump(vha);
  3705. } else {
  3706. goto failed;
  3707. }
  3708. } else {
  3709. ql_log(ql_log_fatal, vha, 0x00cd,
  3710. "ISP Firmware failed checksum.\n");
  3711. goto failed;
  3712. }
  3713. /* Enable PUREX PASSTHRU */
  3714. if (ql2xrdpenable || ha->flags.scm_supported_f ||
  3715. ha->flags.edif_enabled)
  3716. qla25xx_set_els_cmds_supported(vha);
  3717. } else
  3718. goto failed;
  3719. if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
  3720. /* Enable proper parity. */
  3721. spin_lock_irqsave(&ha->hardware_lock, flags);
  3722. if (IS_QLA2300(ha))
  3723. /* SRAM parity */
  3724. wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
  3725. else
  3726. /* SRAM, Instruction RAM and GP RAM parity */
  3727. wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
  3728. rd_reg_word(&reg->hccr);
  3729. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3730. }
  3731. if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
  3732. ha->flags.fac_supported = 1;
  3733. else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
  3734. uint32_t size;
  3735. rval = qla81xx_fac_get_sector_size(vha, &size);
  3736. if (rval == QLA_SUCCESS) {
  3737. ha->flags.fac_supported = 1;
  3738. ha->fdt_block_size = size << 2;
  3739. } else {
  3740. ql_log(ql_log_warn, vha, 0x00ce,
  3741. "Unsupported FAC firmware (%d.%02d.%02d).\n",
  3742. ha->fw_major_version, ha->fw_minor_version,
  3743. ha->fw_subminor_version);
  3744. if (IS_QLA83XX(ha)) {
  3745. ha->flags.fac_supported = 0;
  3746. rval = QLA_SUCCESS;
  3747. }
  3748. }
  3749. }
  3750. failed:
  3751. if (rval) {
  3752. ql_log(ql_log_fatal, vha, 0x00cf,
  3753. "Setup chip ****FAILED****.\n");
  3754. }
  3755. return (rval);
  3756. }
  3757. /**
  3758. * qla2x00_init_response_q_entries() - Initializes response queue entries.
  3759. * @rsp: response queue
  3760. *
  3761. * Beginning of request ring has initialization control block already built
  3762. * by nvram config routine.
  3763. *
  3764. * Returns 0 on success.
  3765. */
  3766. void
  3767. qla2x00_init_response_q_entries(struct rsp_que *rsp)
  3768. {
  3769. uint16_t cnt;
  3770. response_t *pkt;
  3771. rsp->ring_ptr = rsp->ring;
  3772. rsp->ring_index = 0;
  3773. rsp->status_srb = NULL;
  3774. pkt = rsp->ring_ptr;
  3775. for (cnt = 0; cnt < rsp->length; cnt++) {
  3776. pkt->signature = RESPONSE_PROCESSED;
  3777. pkt++;
  3778. }
  3779. }
  3780. /**
  3781. * qla2x00_update_fw_options() - Read and process firmware options.
  3782. * @vha: HA context
  3783. *
  3784. * Returns 0 on success.
  3785. */
  3786. void
  3787. qla2x00_update_fw_options(scsi_qla_host_t *vha)
  3788. {
  3789. uint16_t swing, emphasis, tx_sens, rx_sens;
  3790. struct qla_hw_data *ha = vha->hw;
  3791. memset(ha->fw_options, 0, sizeof(ha->fw_options));
  3792. qla2x00_get_fw_options(vha, ha->fw_options);
  3793. if (IS_QLA2100(ha) || IS_QLA2200(ha))
  3794. return;
  3795. /* Serial Link options. */
  3796. ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
  3797. "Serial link options.\n");
  3798. ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
  3799. ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options));
  3800. ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
  3801. if (ha->fw_seriallink_options[3] & BIT_2) {
  3802. ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
  3803. /* 1G settings */
  3804. swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
  3805. emphasis = (ha->fw_seriallink_options[2] &
  3806. (BIT_4 | BIT_3)) >> 3;
  3807. tx_sens = ha->fw_seriallink_options[0] &
  3808. (BIT_3 | BIT_2 | BIT_1 | BIT_0);
  3809. rx_sens = (ha->fw_seriallink_options[0] &
  3810. (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
  3811. ha->fw_options[10] = (emphasis << 14) | (swing << 8);
  3812. if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
  3813. if (rx_sens == 0x0)
  3814. rx_sens = 0x3;
  3815. ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
  3816. } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
  3817. ha->fw_options[10] |= BIT_5 |
  3818. ((rx_sens & (BIT_1 | BIT_0)) << 2) |
  3819. (tx_sens & (BIT_1 | BIT_0));
  3820. /* 2G settings */
  3821. swing = (ha->fw_seriallink_options[2] &
  3822. (BIT_7 | BIT_6 | BIT_5)) >> 5;
  3823. emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
  3824. tx_sens = ha->fw_seriallink_options[1] &
  3825. (BIT_3 | BIT_2 | BIT_1 | BIT_0);
  3826. rx_sens = (ha->fw_seriallink_options[1] &
  3827. (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
  3828. ha->fw_options[11] = (emphasis << 14) | (swing << 8);
  3829. if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
  3830. if (rx_sens == 0x0)
  3831. rx_sens = 0x3;
  3832. ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
  3833. } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
  3834. ha->fw_options[11] |= BIT_5 |
  3835. ((rx_sens & (BIT_1 | BIT_0)) << 2) |
  3836. (tx_sens & (BIT_1 | BIT_0));
  3837. }
  3838. /* FCP2 options. */
  3839. /* Return command IOCBs without waiting for an ABTS to complete. */
  3840. ha->fw_options[3] |= BIT_13;
  3841. /* LED scheme. */
  3842. if (ha->flags.enable_led_scheme)
  3843. ha->fw_options[2] |= BIT_12;
  3844. /* Detect ISP6312. */
  3845. if (IS_QLA6312(ha))
  3846. ha->fw_options[2] |= BIT_13;
  3847. /* Set Retry FLOGI in case of P2P connection */
  3848. if (ha->operating_mode == P2P) {
  3849. ha->fw_options[2] |= BIT_3;
  3850. ql_dbg(ql_dbg_disc, vha, 0x2100,
  3851. "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
  3852. __func__, ha->fw_options[2]);
  3853. }
  3854. /* Update firmware options. */
  3855. qla2x00_set_fw_options(vha, ha->fw_options);
  3856. }
  3857. void
  3858. qla24xx_update_fw_options(scsi_qla_host_t *vha)
  3859. {
  3860. int rval;
  3861. struct qla_hw_data *ha = vha->hw;
  3862. if (IS_P3P_TYPE(ha))
  3863. return;
  3864. /* Hold status IOCBs until ABTS response received. */
  3865. if (ql2xfwholdabts)
  3866. ha->fw_options[3] |= BIT_12;
  3867. /* Set Retry FLOGI in case of P2P connection */
  3868. if (ha->operating_mode == P2P) {
  3869. ha->fw_options[2] |= BIT_3;
  3870. ql_dbg(ql_dbg_disc, vha, 0x2101,
  3871. "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
  3872. __func__, ha->fw_options[2]);
  3873. }
  3874. /* Move PUREX, ABTS RX & RIDA to ATIOQ */
  3875. if (ql2xmvasynctoatio && !ha->flags.edif_enabled &&
  3876. (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
  3877. if (qla_tgt_mode_enabled(vha) ||
  3878. qla_dual_mode_enabled(vha))
  3879. ha->fw_options[2] |= BIT_11;
  3880. else
  3881. ha->fw_options[2] &= ~BIT_11;
  3882. }
  3883. if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
  3884. IS_QLA28XX(ha)) {
  3885. /*
  3886. * Tell FW to track each exchange to prevent
  3887. * driver from using stale exchange.
  3888. */
  3889. if (qla_tgt_mode_enabled(vha) ||
  3890. qla_dual_mode_enabled(vha))
  3891. ha->fw_options[2] |= BIT_4;
  3892. else
  3893. ha->fw_options[2] &= ~(BIT_4);
  3894. /* Reserve 1/2 of emergency exchanges for ELS.*/
  3895. if (qla2xuseresexchforels)
  3896. ha->fw_options[2] |= BIT_8;
  3897. else
  3898. ha->fw_options[2] &= ~BIT_8;
  3899. /*
  3900. * N2N: set Secure=1 for PLOGI ACC and
  3901. * fw shal not send PRLI after PLOGI Acc
  3902. */
  3903. if (ha->flags.edif_enabled &&
  3904. DBELL_ACTIVE(vha)) {
  3905. ha->fw_options[3] |= BIT_15;
  3906. ha->flags.n2n_fw_acc_sec = 1;
  3907. } else {
  3908. ha->fw_options[3] &= ~BIT_15;
  3909. ha->flags.n2n_fw_acc_sec = 0;
  3910. }
  3911. }
  3912. if (ql2xrdpenable || ha->flags.scm_supported_f ||
  3913. ha->flags.edif_enabled)
  3914. ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB;
  3915. /* Enable Async 8130/8131 events -- transceiver insertion/removal */
  3916. if (IS_BPM_RANGE_CAPABLE(ha))
  3917. ha->fw_options[3] |= BIT_10;
  3918. ql_dbg(ql_dbg_init, vha, 0x00e8,
  3919. "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
  3920. __func__, ha->fw_options[1], ha->fw_options[2],
  3921. ha->fw_options[3], vha->host->active_mode);
  3922. if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
  3923. qla2x00_set_fw_options(vha, ha->fw_options);
  3924. /* Update Serial Link options. */
  3925. if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
  3926. return;
  3927. rval = qla2x00_set_serdes_params(vha,
  3928. le16_to_cpu(ha->fw_seriallink_options24[1]),
  3929. le16_to_cpu(ha->fw_seriallink_options24[2]),
  3930. le16_to_cpu(ha->fw_seriallink_options24[3]));
  3931. if (rval != QLA_SUCCESS) {
  3932. ql_log(ql_log_warn, vha, 0x0104,
  3933. "Unable to update Serial Link options (%x).\n", rval);
  3934. }
  3935. }
  3936. void
  3937. qla2x00_config_rings(struct scsi_qla_host *vha)
  3938. {
  3939. struct qla_hw_data *ha = vha->hw;
  3940. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  3941. struct req_que *req = ha->req_q_map[0];
  3942. struct rsp_que *rsp = ha->rsp_q_map[0];
  3943. /* Setup ring parameters in initialization control block. */
  3944. ha->init_cb->request_q_outpointer = cpu_to_le16(0);
  3945. ha->init_cb->response_q_inpointer = cpu_to_le16(0);
  3946. ha->init_cb->request_q_length = cpu_to_le16(req->length);
  3947. ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
  3948. put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
  3949. put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
  3950. wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0);
  3951. wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0);
  3952. wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0);
  3953. wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0);
  3954. rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
  3955. }
  3956. void
  3957. qla24xx_config_rings(struct scsi_qla_host *vha)
  3958. {
  3959. struct qla_hw_data *ha = vha->hw;
  3960. device_reg_t *reg = ISP_QUE_REG(ha, 0);
  3961. struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
  3962. struct qla_msix_entry *msix;
  3963. struct init_cb_24xx *icb;
  3964. uint16_t rid = 0;
  3965. struct req_que *req = ha->req_q_map[0];
  3966. struct rsp_que *rsp = ha->rsp_q_map[0];
  3967. /* Setup ring parameters in initialization control block. */
  3968. icb = (struct init_cb_24xx *)ha->init_cb;
  3969. icb->request_q_outpointer = cpu_to_le16(0);
  3970. icb->response_q_inpointer = cpu_to_le16(0);
  3971. icb->request_q_length = cpu_to_le16(req->length);
  3972. icb->response_q_length = cpu_to_le16(rsp->length);
  3973. put_unaligned_le64(req->dma, &icb->request_q_address);
  3974. put_unaligned_le64(rsp->dma, &icb->response_q_address);
  3975. /* Setup ATIO queue dma pointers for target mode */
  3976. icb->atio_q_inpointer = cpu_to_le16(0);
  3977. icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
  3978. put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address);
  3979. if (IS_SHADOW_REG_CAPABLE(ha))
  3980. icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
  3981. if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
  3982. IS_QLA28XX(ha)) {
  3983. icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
  3984. icb->rid = cpu_to_le16(rid);
  3985. if (ha->flags.msix_enabled) {
  3986. msix = &ha->msix_entries[1];
  3987. ql_dbg(ql_dbg_init, vha, 0x0019,
  3988. "Registering vector 0x%x for base que.\n",
  3989. msix->entry);
  3990. icb->msix = cpu_to_le16(msix->entry);
  3991. }
  3992. /* Use alternate PCI bus number */
  3993. if (MSB(rid))
  3994. icb->firmware_options_2 |= cpu_to_le32(BIT_19);
  3995. /* Use alternate PCI devfn */
  3996. if (LSB(rid))
  3997. icb->firmware_options_2 |= cpu_to_le32(BIT_18);
  3998. /* Use Disable MSIX Handshake mode for capable adapters */
  3999. if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
  4000. (ha->flags.msix_enabled)) {
  4001. icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
  4002. ha->flags.disable_msix_handshake = 1;
  4003. ql_dbg(ql_dbg_init, vha, 0x00fe,
  4004. "MSIX Handshake Disable Mode turned on.\n");
  4005. } else {
  4006. icb->firmware_options_2 |= cpu_to_le32(BIT_22);
  4007. }
  4008. icb->firmware_options_2 |= cpu_to_le32(BIT_23);
  4009. wrt_reg_dword(&reg->isp25mq.req_q_in, 0);
  4010. wrt_reg_dword(&reg->isp25mq.req_q_out, 0);
  4011. wrt_reg_dword(&reg->isp25mq.rsp_q_in, 0);
  4012. wrt_reg_dword(&reg->isp25mq.rsp_q_out, 0);
  4013. } else {
  4014. wrt_reg_dword(&reg->isp24.req_q_in, 0);
  4015. wrt_reg_dword(&reg->isp24.req_q_out, 0);
  4016. wrt_reg_dword(&reg->isp24.rsp_q_in, 0);
  4017. wrt_reg_dword(&reg->isp24.rsp_q_out, 0);
  4018. }
  4019. qlt_24xx_config_rings(vha);
  4020. /* If the user has configured the speed, set it here */
  4021. if (ha->set_data_rate) {
  4022. ql_dbg(ql_dbg_init, vha, 0x00fd,
  4023. "Speed set by user : %s Gbps \n",
  4024. qla2x00_get_link_speed_str(ha, ha->set_data_rate));
  4025. icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13);
  4026. }
  4027. /* PCI posting */
  4028. rd_reg_word(&ioreg->hccr);
  4029. }
  4030. /**
  4031. * qla2x00_init_rings() - Initializes firmware.
  4032. * @vha: HA context
  4033. *
  4034. * Beginning of request ring has initialization control block already built
  4035. * by nvram config routine.
  4036. *
  4037. * Returns 0 on success.
  4038. */
  4039. int
  4040. qla2x00_init_rings(scsi_qla_host_t *vha)
  4041. {
  4042. int rval;
  4043. unsigned long flags = 0;
  4044. int cnt, que;
  4045. struct qla_hw_data *ha = vha->hw;
  4046. struct req_que *req;
  4047. struct rsp_que *rsp;
  4048. struct mid_init_cb_24xx *mid_init_cb =
  4049. (struct mid_init_cb_24xx *) ha->init_cb;
  4050. spin_lock_irqsave(&ha->hardware_lock, flags);
  4051. /* Clear outstanding commands array. */
  4052. for (que = 0; que < ha->max_req_queues; que++) {
  4053. req = ha->req_q_map[que];
  4054. if (!req || !test_bit(que, ha->req_qid_map))
  4055. continue;
  4056. req->out_ptr = (uint16_t *)(req->ring + req->length);
  4057. *req->out_ptr = 0;
  4058. for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
  4059. req->outstanding_cmds[cnt] = NULL;
  4060. req->current_outstanding_cmd = 1;
  4061. /* Initialize firmware. */
  4062. req->ring_ptr = req->ring;
  4063. req->ring_index = 0;
  4064. req->cnt = req->length;
  4065. }
  4066. for (que = 0; que < ha->max_rsp_queues; que++) {
  4067. rsp = ha->rsp_q_map[que];
  4068. if (!rsp || !test_bit(que, ha->rsp_qid_map))
  4069. continue;
  4070. rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
  4071. *rsp->in_ptr = 0;
  4072. /* Initialize response queue entries */
  4073. if (IS_QLAFX00(ha))
  4074. qlafx00_init_response_q_entries(rsp);
  4075. else
  4076. qla2x00_init_response_q_entries(rsp);
  4077. }
  4078. ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
  4079. ha->tgt.atio_ring_index = 0;
  4080. /* Initialize ATIO queue entries */
  4081. qlt_init_atio_q_entries(vha);
  4082. ha->isp_ops->config_rings(vha);
  4083. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  4084. if (IS_QLAFX00(ha)) {
  4085. rval = qlafx00_init_firmware(vha, ha->init_cb_size);
  4086. goto next_check;
  4087. }
  4088. /* Update any ISP specific firmware options before initialization. */
  4089. ha->isp_ops->update_fw_options(vha);
  4090. ql_dbg(ql_dbg_init, vha, 0x00d1,
  4091. "Issue init firmware FW opt 1-3= %08x %08x %08x.\n",
  4092. le32_to_cpu(mid_init_cb->init_cb.firmware_options_1),
  4093. le32_to_cpu(mid_init_cb->init_cb.firmware_options_2),
  4094. le32_to_cpu(mid_init_cb->init_cb.firmware_options_3));
  4095. if (ha->flags.npiv_supported) {
  4096. if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
  4097. ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
  4098. mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
  4099. }
  4100. if (IS_FWI2_CAPABLE(ha)) {
  4101. mid_init_cb->options = cpu_to_le16(BIT_1);
  4102. mid_init_cb->init_cb.execution_throttle =
  4103. cpu_to_le16(ha->cur_fw_xcb_count);
  4104. ha->flags.dport_enabled =
  4105. (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
  4106. BIT_7) != 0;
  4107. ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
  4108. (ha->flags.dport_enabled) ? "enabled" : "disabled");
  4109. /* FA-WWPN Status */
  4110. ha->flags.fawwpn_enabled =
  4111. (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
  4112. BIT_6) != 0;
  4113. ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
  4114. (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
  4115. /* Init_cb will be reused for other command(s). Save a backup copy of port_name */
  4116. memcpy(ha->port_name, ha->init_cb->port_name, WWN_SIZE);
  4117. }
  4118. /* ELS pass through payload is limit by frame size. */
  4119. if (ha->flags.edif_enabled)
  4120. mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD);
  4121. QLA_FW_STARTED(ha);
  4122. rval = qla2x00_init_firmware(vha, ha->init_cb_size);
  4123. next_check:
  4124. if (rval) {
  4125. QLA_FW_STOPPED(ha);
  4126. ql_log(ql_log_fatal, vha, 0x00d2,
  4127. "Init Firmware **** FAILED ****.\n");
  4128. } else {
  4129. ql_dbg(ql_dbg_init, vha, 0x00d3,
  4130. "Init Firmware -- success.\n");
  4131. vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
  4132. }
  4133. return (rval);
  4134. }
  4135. /**
  4136. * qla2x00_fw_ready() - Waits for firmware ready.
  4137. * @vha: HA context
  4138. *
  4139. * Returns 0 on success.
  4140. */
  4141. static int
  4142. qla2x00_fw_ready(scsi_qla_host_t *vha)
  4143. {
  4144. int rval;
  4145. unsigned long wtime, mtime, cs84xx_time;
  4146. uint16_t min_wait; /* Minimum wait time if loop is down */
  4147. uint16_t wait_time; /* Wait time if loop is coming ready */
  4148. uint16_t state[6];
  4149. struct qla_hw_data *ha = vha->hw;
  4150. if (IS_QLAFX00(vha->hw))
  4151. return qlafx00_fw_ready(vha);
  4152. /* Time to wait for loop down */
  4153. if (IS_P3P_TYPE(ha))
  4154. min_wait = 30;
  4155. else
  4156. min_wait = 20;
  4157. /*
  4158. * Firmware should take at most one RATOV to login, plus 5 seconds for
  4159. * our own processing.
  4160. */
  4161. if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
  4162. wait_time = min_wait;
  4163. }
  4164. /* Min wait time if loop down */
  4165. mtime = jiffies + (min_wait * HZ);
  4166. /* wait time before firmware ready */
  4167. wtime = jiffies + (wait_time * HZ);
  4168. /* Wait for ISP to finish LIP */
  4169. if (!vha->flags.init_done)
  4170. ql_log(ql_log_info, vha, 0x801e,
  4171. "Waiting for LIP to complete.\n");
  4172. do {
  4173. memset(state, -1, sizeof(state));
  4174. rval = qla2x00_get_firmware_state(vha, state);
  4175. if (rval == QLA_SUCCESS) {
  4176. if (state[0] < FSTATE_LOSS_OF_SYNC) {
  4177. vha->device_flags &= ~DFLG_NO_CABLE;
  4178. }
  4179. if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
  4180. ql_dbg(ql_dbg_taskm, vha, 0x801f,
  4181. "fw_state=%x 84xx=%x.\n", state[0],
  4182. state[2]);
  4183. if ((state[2] & FSTATE_LOGGED_IN) &&
  4184. (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
  4185. ql_dbg(ql_dbg_taskm, vha, 0x8028,
  4186. "Sending verify iocb.\n");
  4187. cs84xx_time = jiffies;
  4188. rval = qla84xx_init_chip(vha);
  4189. if (rval != QLA_SUCCESS) {
  4190. ql_log(ql_log_warn,
  4191. vha, 0x8007,
  4192. "Init chip failed.\n");
  4193. break;
  4194. }
  4195. /* Add time taken to initialize. */
  4196. cs84xx_time = jiffies - cs84xx_time;
  4197. wtime += cs84xx_time;
  4198. mtime += cs84xx_time;
  4199. ql_dbg(ql_dbg_taskm, vha, 0x8008,
  4200. "Increasing wait time by %ld. "
  4201. "New time %ld.\n", cs84xx_time,
  4202. wtime);
  4203. }
  4204. } else if (state[0] == FSTATE_READY) {
  4205. ql_dbg(ql_dbg_taskm, vha, 0x8037,
  4206. "F/W Ready - OK.\n");
  4207. qla2x00_get_retry_cnt(vha, &ha->retry_count,
  4208. &ha->login_timeout, &ha->r_a_tov);
  4209. rval = QLA_SUCCESS;
  4210. break;
  4211. }
  4212. rval = QLA_FUNCTION_FAILED;
  4213. if (atomic_read(&vha->loop_down_timer) &&
  4214. state[0] != FSTATE_READY) {
  4215. /* Loop down. Timeout on min_wait for states
  4216. * other than Wait for Login.
  4217. */
  4218. if (time_after_eq(jiffies, mtime)) {
  4219. ql_log(ql_log_info, vha, 0x8038,
  4220. "Cable is unplugged...\n");
  4221. vha->device_flags |= DFLG_NO_CABLE;
  4222. break;
  4223. }
  4224. }
  4225. } else {
  4226. /* Mailbox cmd failed. Timeout on min_wait. */
  4227. if (time_after_eq(jiffies, mtime) ||
  4228. ha->flags.isp82xx_fw_hung)
  4229. break;
  4230. }
  4231. if (time_after_eq(jiffies, wtime))
  4232. break;
  4233. /* Delay for a while */
  4234. msleep(500);
  4235. } while (1);
  4236. ql_dbg(ql_dbg_taskm, vha, 0x803a,
  4237. "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
  4238. state[1], state[2], state[3], state[4], state[5], jiffies);
  4239. if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
  4240. ql_log(ql_log_warn, vha, 0x803b,
  4241. "Firmware ready **** FAILED ****.\n");
  4242. }
  4243. return (rval);
  4244. }
  4245. /*
  4246. * qla2x00_configure_hba
  4247. * Setup adapter context.
  4248. *
  4249. * Input:
  4250. * ha = adapter state pointer.
  4251. *
  4252. * Returns:
  4253. * 0 = success
  4254. *
  4255. * Context:
  4256. * Kernel context.
  4257. */
  4258. static int
  4259. qla2x00_configure_hba(scsi_qla_host_t *vha)
  4260. {
  4261. int rval;
  4262. uint16_t loop_id;
  4263. uint16_t topo;
  4264. uint16_t sw_cap;
  4265. uint8_t al_pa;
  4266. uint8_t area;
  4267. uint8_t domain;
  4268. char connect_type[22];
  4269. struct qla_hw_data *ha = vha->hw;
  4270. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  4271. port_id_t id;
  4272. unsigned long flags;
  4273. /* Get host addresses. */
  4274. rval = qla2x00_get_adapter_id(vha,
  4275. &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
  4276. if (rval != QLA_SUCCESS) {
  4277. if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
  4278. IS_CNA_CAPABLE(ha) ||
  4279. (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
  4280. ql_dbg(ql_dbg_disc, vha, 0x2008,
  4281. "Loop is in a transition state.\n");
  4282. } else {
  4283. ql_log(ql_log_warn, vha, 0x2009,
  4284. "Unable to get host loop ID.\n");
  4285. if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
  4286. (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
  4287. ql_log(ql_log_warn, vha, 0x1151,
  4288. "Doing link init.\n");
  4289. if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
  4290. return rval;
  4291. }
  4292. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  4293. }
  4294. return (rval);
  4295. }
  4296. if (topo == 4) {
  4297. ql_log(ql_log_info, vha, 0x200a,
  4298. "Cannot get topology - retrying.\n");
  4299. return (QLA_FUNCTION_FAILED);
  4300. }
  4301. vha->loop_id = loop_id;
  4302. /* initialize */
  4303. ha->min_external_loopid = SNS_FIRST_LOOP_ID;
  4304. ha->operating_mode = LOOP;
  4305. switch (topo) {
  4306. case 0:
  4307. ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
  4308. ha->switch_cap = 0;
  4309. ha->current_topology = ISP_CFG_NL;
  4310. strcpy(connect_type, "(Loop)");
  4311. break;
  4312. case 1:
  4313. ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
  4314. ha->switch_cap = sw_cap;
  4315. ha->current_topology = ISP_CFG_FL;
  4316. strcpy(connect_type, "(FL_Port)");
  4317. break;
  4318. case 2:
  4319. ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
  4320. ha->switch_cap = 0;
  4321. ha->operating_mode = P2P;
  4322. ha->current_topology = ISP_CFG_N;
  4323. strcpy(connect_type, "(N_Port-to-N_Port)");
  4324. break;
  4325. case 3:
  4326. ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
  4327. ha->switch_cap = sw_cap;
  4328. ha->operating_mode = P2P;
  4329. ha->current_topology = ISP_CFG_F;
  4330. strcpy(connect_type, "(F_Port)");
  4331. break;
  4332. default:
  4333. ql_dbg(ql_dbg_disc, vha, 0x200f,
  4334. "HBA in unknown topology %x, using NL.\n", topo);
  4335. ha->switch_cap = 0;
  4336. ha->current_topology = ISP_CFG_NL;
  4337. strcpy(connect_type, "(Loop)");
  4338. break;
  4339. }
  4340. /* Save Host port and loop ID. */
  4341. /* byte order - Big Endian */
  4342. id.b.domain = domain;
  4343. id.b.area = area;
  4344. id.b.al_pa = al_pa;
  4345. id.b.rsvd_1 = 0;
  4346. spin_lock_irqsave(&ha->hardware_lock, flags);
  4347. if (vha->hw->flags.edif_enabled) {
  4348. if (topo != 2)
  4349. qlt_update_host_map(vha, id);
  4350. } else if (!(topo == 2 && ha->flags.n2n_bigger))
  4351. qlt_update_host_map(vha, id);
  4352. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  4353. if (!vha->flags.init_done)
  4354. ql_log(ql_log_info, vha, 0x2010,
  4355. "Topology - %s, Host Loop address 0x%x.\n",
  4356. connect_type, vha->loop_id);
  4357. return(rval);
  4358. }
  4359. inline void
  4360. qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
  4361. const char *def)
  4362. {
  4363. char *st, *en;
  4364. uint16_t index;
  4365. uint64_t zero[2] = { 0 };
  4366. struct qla_hw_data *ha = vha->hw;
  4367. int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
  4368. !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
  4369. if (len > sizeof(zero))
  4370. len = sizeof(zero);
  4371. if (memcmp(model, &zero, len) != 0) {
  4372. memcpy(ha->model_number, model, len);
  4373. st = en = ha->model_number;
  4374. en += len - 1;
  4375. while (en > st) {
  4376. if (*en != 0x20 && *en != 0x00)
  4377. break;
  4378. *en-- = '\0';
  4379. }
  4380. index = (ha->pdev->subsystem_device & 0xff);
  4381. if (use_tbl &&
  4382. ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
  4383. index < QLA_MODEL_NAMES)
  4384. strlcpy(ha->model_desc,
  4385. qla2x00_model_name[index * 2 + 1],
  4386. sizeof(ha->model_desc));
  4387. } else {
  4388. index = (ha->pdev->subsystem_device & 0xff);
  4389. if (use_tbl &&
  4390. ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
  4391. index < QLA_MODEL_NAMES) {
  4392. strlcpy(ha->model_number,
  4393. qla2x00_model_name[index * 2],
  4394. sizeof(ha->model_number));
  4395. strlcpy(ha->model_desc,
  4396. qla2x00_model_name[index * 2 + 1],
  4397. sizeof(ha->model_desc));
  4398. } else {
  4399. strlcpy(ha->model_number, def,
  4400. sizeof(ha->model_number));
  4401. }
  4402. }
  4403. if (IS_FWI2_CAPABLE(ha))
  4404. qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
  4405. sizeof(ha->model_desc));
  4406. }
  4407. /* On sparc systems, obtain port and node WWN from firmware
  4408. * properties.
  4409. */
  4410. static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
  4411. {
  4412. #ifdef CONFIG_SPARC
  4413. struct qla_hw_data *ha = vha->hw;
  4414. struct pci_dev *pdev = ha->pdev;
  4415. struct device_node *dp = pci_device_to_OF_node(pdev);
  4416. const u8 *val;
  4417. int len;
  4418. val = of_get_property(dp, "port-wwn", &len);
  4419. if (val && len >= WWN_SIZE)
  4420. memcpy(nv->port_name, val, WWN_SIZE);
  4421. val = of_get_property(dp, "node-wwn", &len);
  4422. if (val && len >= WWN_SIZE)
  4423. memcpy(nv->node_name, val, WWN_SIZE);
  4424. #endif
  4425. }
  4426. /*
  4427. * NVRAM configuration for ISP 2xxx
  4428. *
  4429. * Input:
  4430. * ha = adapter block pointer.
  4431. *
  4432. * Output:
  4433. * initialization control block in response_ring
  4434. * host adapters parameters in host adapter block
  4435. *
  4436. * Returns:
  4437. * 0 = success.
  4438. */
  4439. int
  4440. qla2x00_nvram_config(scsi_qla_host_t *vha)
  4441. {
  4442. int rval;
  4443. uint8_t chksum = 0;
  4444. uint16_t cnt;
  4445. uint8_t *dptr1, *dptr2;
  4446. struct qla_hw_data *ha = vha->hw;
  4447. init_cb_t *icb = ha->init_cb;
  4448. nvram_t *nv = ha->nvram;
  4449. uint8_t *ptr = ha->nvram;
  4450. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  4451. rval = QLA_SUCCESS;
  4452. /* Determine NVRAM starting address. */
  4453. ha->nvram_size = sizeof(*nv);
  4454. ha->nvram_base = 0;
  4455. if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
  4456. if ((rd_reg_word(&reg->ctrl_status) >> 14) == 1)
  4457. ha->nvram_base = 0x80;
  4458. /* Get NVRAM data and calculate checksum. */
  4459. ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
  4460. for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
  4461. chksum += *ptr++;
  4462. ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
  4463. "Contents of NVRAM.\n");
  4464. ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
  4465. nv, ha->nvram_size);
  4466. /* Bad NVRAM data, set defaults parameters. */
  4467. if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
  4468. nv->nvram_version < 1) {
  4469. /* Reset NVRAM data. */
  4470. ql_log(ql_log_warn, vha, 0x0064,
  4471. "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n",
  4472. chksum, nv->id, nv->nvram_version);
  4473. ql_log(ql_log_warn, vha, 0x0065,
  4474. "Falling back to "
  4475. "functioning (yet invalid -- WWPN) defaults.\n");
  4476. /*
  4477. * Set default initialization control block.
  4478. */
  4479. memset(nv, 0, ha->nvram_size);
  4480. nv->parameter_block_version = ICB_VERSION;
  4481. if (IS_QLA23XX(ha)) {
  4482. nv->firmware_options[0] = BIT_2 | BIT_1;
  4483. nv->firmware_options[1] = BIT_7 | BIT_5;
  4484. nv->add_firmware_options[0] = BIT_5;
  4485. nv->add_firmware_options[1] = BIT_5 | BIT_4;
  4486. nv->frame_payload_size = cpu_to_le16(2048);
  4487. nv->special_options[1] = BIT_7;
  4488. } else if (IS_QLA2200(ha)) {
  4489. nv->firmware_options[0] = BIT_2 | BIT_1;
  4490. nv->firmware_options[1] = BIT_7 | BIT_5;
  4491. nv->add_firmware_options[0] = BIT_5;
  4492. nv->add_firmware_options[1] = BIT_5 | BIT_4;
  4493. nv->frame_payload_size = cpu_to_le16(1024);
  4494. } else if (IS_QLA2100(ha)) {
  4495. nv->firmware_options[0] = BIT_3 | BIT_1;
  4496. nv->firmware_options[1] = BIT_5;
  4497. nv->frame_payload_size = cpu_to_le16(1024);
  4498. }
  4499. nv->max_iocb_allocation = cpu_to_le16(256);
  4500. nv->execution_throttle = cpu_to_le16(16);
  4501. nv->retry_count = 8;
  4502. nv->retry_delay = 1;
  4503. nv->port_name[0] = 33;
  4504. nv->port_name[3] = 224;
  4505. nv->port_name[4] = 139;
  4506. qla2xxx_nvram_wwn_from_ofw(vha, nv);
  4507. nv->login_timeout = 4;
  4508. /*
  4509. * Set default host adapter parameters
  4510. */
  4511. nv->host_p[1] = BIT_2;
  4512. nv->reset_delay = 5;
  4513. nv->port_down_retry_count = 8;
  4514. nv->max_luns_per_target = cpu_to_le16(8);
  4515. nv->link_down_timeout = 60;
  4516. rval = 1;
  4517. }
  4518. /* Reset Initialization control block */
  4519. memset(icb, 0, ha->init_cb_size);
  4520. /*
  4521. * Setup driver NVRAM options.
  4522. */
  4523. nv->firmware_options[0] |= (BIT_6 | BIT_1);
  4524. nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
  4525. nv->firmware_options[1] |= (BIT_5 | BIT_0);
  4526. nv->firmware_options[1] &= ~BIT_4;
  4527. if (IS_QLA23XX(ha)) {
  4528. nv->firmware_options[0] |= BIT_2;
  4529. nv->firmware_options[0] &= ~BIT_3;
  4530. nv->special_options[0] &= ~BIT_6;
  4531. nv->add_firmware_options[1] |= BIT_5 | BIT_4;
  4532. if (IS_QLA2300(ha)) {
  4533. if (ha->fb_rev == FPM_2310) {
  4534. strcpy(ha->model_number, "QLA2310");
  4535. } else {
  4536. strcpy(ha->model_number, "QLA2300");
  4537. }
  4538. } else {
  4539. qla2x00_set_model_info(vha, nv->model_number,
  4540. sizeof(nv->model_number), "QLA23xx");
  4541. }
  4542. } else if (IS_QLA2200(ha)) {
  4543. nv->firmware_options[0] |= BIT_2;
  4544. /*
  4545. * 'Point-to-point preferred, else loop' is not a safe
  4546. * connection mode setting.
  4547. */
  4548. if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
  4549. (BIT_5 | BIT_4)) {
  4550. /* Force 'loop preferred, else point-to-point'. */
  4551. nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
  4552. nv->add_firmware_options[0] |= BIT_5;
  4553. }
  4554. strcpy(ha->model_number, "QLA22xx");
  4555. } else /*if (IS_QLA2100(ha))*/ {
  4556. strcpy(ha->model_number, "QLA2100");
  4557. }
  4558. /*
  4559. * Copy over NVRAM RISC parameter block to initialization control block.
  4560. */
  4561. dptr1 = (uint8_t *)icb;
  4562. dptr2 = (uint8_t *)&nv->parameter_block_version;
  4563. cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
  4564. while (cnt--)
  4565. *dptr1++ = *dptr2++;
  4566. /* Copy 2nd half. */
  4567. dptr1 = (uint8_t *)icb->add_firmware_options;
  4568. cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
  4569. while (cnt--)
  4570. *dptr1++ = *dptr2++;
  4571. ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
  4572. /* Use alternate WWN? */
  4573. if (nv->host_p[1] & BIT_7) {
  4574. memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
  4575. memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
  4576. }
  4577. /* Prepare nodename */
  4578. if ((icb->firmware_options[1] & BIT_6) == 0) {
  4579. /*
  4580. * Firmware will apply the following mask if the nodename was
  4581. * not provided.
  4582. */
  4583. memcpy(icb->node_name, icb->port_name, WWN_SIZE);
  4584. icb->node_name[0] &= 0xF0;
  4585. }
  4586. /*
  4587. * Set host adapter parameters.
  4588. */
  4589. /*
  4590. * BIT_7 in the host-parameters section allows for modification to
  4591. * internal driver logging.
  4592. */
  4593. if (nv->host_p[0] & BIT_7)
  4594. ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
  4595. ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
  4596. /* Always load RISC code on non ISP2[12]00 chips. */
  4597. if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
  4598. ha->flags.disable_risc_code_load = 0;
  4599. ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
  4600. ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
  4601. ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
  4602. ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
  4603. ha->flags.disable_serdes = 0;
  4604. ha->operating_mode =
  4605. (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
  4606. memcpy(ha->fw_seriallink_options, nv->seriallink_options,
  4607. sizeof(ha->fw_seriallink_options));
  4608. /* save HBA serial number */
  4609. ha->serial0 = icb->port_name[5];
  4610. ha->serial1 = icb->port_name[6];
  4611. ha->serial2 = icb->port_name[7];
  4612. memcpy(vha->node_name, icb->node_name, WWN_SIZE);
  4613. memcpy(vha->port_name, icb->port_name, WWN_SIZE);
  4614. icb->execution_throttle = cpu_to_le16(0xFFFF);
  4615. ha->retry_count = nv->retry_count;
  4616. /* Set minimum login_timeout to 4 seconds. */
  4617. if (nv->login_timeout != ql2xlogintimeout)
  4618. nv->login_timeout = ql2xlogintimeout;
  4619. if (nv->login_timeout < 4)
  4620. nv->login_timeout = 4;
  4621. ha->login_timeout = nv->login_timeout;
  4622. /* Set minimum RATOV to 100 tenths of a second. */
  4623. ha->r_a_tov = 100;
  4624. ha->loop_reset_delay = nv->reset_delay;
  4625. /* Link Down Timeout = 0:
  4626. *
  4627. * When Port Down timer expires we will start returning
  4628. * I/O's to OS with "DID_NO_CONNECT".
  4629. *
  4630. * Link Down Timeout != 0:
  4631. *
  4632. * The driver waits for the link to come up after link down
  4633. * before returning I/Os to OS with "DID_NO_CONNECT".
  4634. */
  4635. if (nv->link_down_timeout == 0) {
  4636. ha->loop_down_abort_time =
  4637. (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
  4638. } else {
  4639. ha->link_down_timeout = nv->link_down_timeout;
  4640. ha->loop_down_abort_time =
  4641. (LOOP_DOWN_TIME - ha->link_down_timeout);
  4642. }
  4643. /*
  4644. * Need enough time to try and get the port back.
  4645. */
  4646. ha->port_down_retry_count = nv->port_down_retry_count;
  4647. if (qlport_down_retry)
  4648. ha->port_down_retry_count = qlport_down_retry;
  4649. /* Set login_retry_count */
  4650. ha->login_retry_count = nv->retry_count;
  4651. if (ha->port_down_retry_count == nv->port_down_retry_count &&
  4652. ha->port_down_retry_count > 3)
  4653. ha->login_retry_count = ha->port_down_retry_count;
  4654. else if (ha->port_down_retry_count > (int)ha->login_retry_count)
  4655. ha->login_retry_count = ha->port_down_retry_count;
  4656. if (ql2xloginretrycount)
  4657. ha->login_retry_count = ql2xloginretrycount;
  4658. icb->lun_enables = cpu_to_le16(0);
  4659. icb->command_resource_count = 0;
  4660. icb->immediate_notify_resource_count = 0;
  4661. icb->timeout = cpu_to_le16(0);
  4662. if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
  4663. /* Enable RIO */
  4664. icb->firmware_options[0] &= ~BIT_3;
  4665. icb->add_firmware_options[0] &=
  4666. ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
  4667. icb->add_firmware_options[0] |= BIT_2;
  4668. icb->response_accumulation_timer = 3;
  4669. icb->interrupt_delay_timer = 5;
  4670. vha->flags.process_response_queue = 1;
  4671. } else {
  4672. /* Enable ZIO. */
  4673. if (!vha->flags.init_done) {
  4674. ha->zio_mode = icb->add_firmware_options[0] &
  4675. (BIT_3 | BIT_2 | BIT_1 | BIT_0);
  4676. ha->zio_timer = icb->interrupt_delay_timer ?
  4677. icb->interrupt_delay_timer : 2;
  4678. }
  4679. icb->add_firmware_options[0] &=
  4680. ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
  4681. vha->flags.process_response_queue = 0;
  4682. if (ha->zio_mode != QLA_ZIO_DISABLED) {
  4683. ha->zio_mode = QLA_ZIO_MODE_6;
  4684. ql_log(ql_log_info, vha, 0x0068,
  4685. "ZIO mode %d enabled; timer delay (%d us).\n",
  4686. ha->zio_mode, ha->zio_timer * 100);
  4687. icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
  4688. icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
  4689. vha->flags.process_response_queue = 1;
  4690. }
  4691. }
  4692. if (rval) {
  4693. ql_log(ql_log_warn, vha, 0x0069,
  4694. "NVRAM configuration failed.\n");
  4695. }
  4696. return (rval);
  4697. }
  4698. static void
  4699. qla2x00_rport_del(void *data)
  4700. {
  4701. fc_port_t *fcport = data;
  4702. struct fc_rport *rport;
  4703. unsigned long flags;
  4704. spin_lock_irqsave(fcport->vha->host->host_lock, flags);
  4705. rport = fcport->drport ? fcport->drport : fcport->rport;
  4706. fcport->drport = NULL;
  4707. spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
  4708. if (rport) {
  4709. ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
  4710. "%s %8phN. rport %p roles %x\n",
  4711. __func__, fcport->port_name, rport,
  4712. rport->roles);
  4713. fc_remote_port_delete(rport);
  4714. }
  4715. }
  4716. void qla2x00_set_fcport_state(fc_port_t *fcport, int state)
  4717. {
  4718. int old_state;
  4719. old_state = atomic_read(&fcport->state);
  4720. atomic_set(&fcport->state, state);
  4721. /* Don't print state transitions during initial allocation of fcport */
  4722. if (old_state && old_state != state) {
  4723. ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
  4724. "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n",
  4725. fcport->port_name, port_state_str[old_state],
  4726. port_state_str[state], fcport->d_id.b.domain,
  4727. fcport->d_id.b.area, fcport->d_id.b.al_pa);
  4728. }
  4729. }
  4730. /**
  4731. * qla2x00_alloc_fcport() - Allocate a generic fcport.
  4732. * @vha: HA context
  4733. * @flags: allocation flags
  4734. *
  4735. * Returns a pointer to the allocated fcport, or NULL, if none available.
  4736. */
  4737. fc_port_t *
  4738. qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
  4739. {
  4740. fc_port_t *fcport;
  4741. fcport = kzalloc(sizeof(fc_port_t), flags);
  4742. if (!fcport)
  4743. return NULL;
  4744. fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
  4745. sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
  4746. flags);
  4747. if (!fcport->ct_desc.ct_sns) {
  4748. ql_log(ql_log_warn, vha, 0xd049,
  4749. "Failed to allocate ct_sns request.\n");
  4750. kfree(fcport);
  4751. return NULL;
  4752. }
  4753. /* Setup fcport template structure. */
  4754. fcport->vha = vha;
  4755. fcport->port_type = FCT_UNKNOWN;
  4756. fcport->loop_id = FC_NO_LOOP_ID;
  4757. qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
  4758. fcport->supported_classes = FC_COS_UNSPECIFIED;
  4759. fcport->fp_speed = PORT_SPEED_UNKNOWN;
  4760. fcport->disc_state = DSC_DELETED;
  4761. fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
  4762. fcport->deleted = QLA_SESS_DELETED;
  4763. fcport->login_retry = vha->hw->login_retry_count;
  4764. fcport->chip_reset = vha->hw->base_qpair->chip_reset;
  4765. fcport->logout_on_delete = 1;
  4766. fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
  4767. fcport->tgt_short_link_down_cnt = 0;
  4768. fcport->dev_loss_tmo = 0;
  4769. if (!fcport->ct_desc.ct_sns) {
  4770. ql_log(ql_log_warn, vha, 0xd049,
  4771. "Failed to allocate ct_sns request.\n");
  4772. kfree(fcport);
  4773. return NULL;
  4774. }
  4775. INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
  4776. INIT_WORK(&fcport->free_work, qlt_free_session_done);
  4777. INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
  4778. INIT_LIST_HEAD(&fcport->gnl_entry);
  4779. INIT_LIST_HEAD(&fcport->list);
  4780. INIT_LIST_HEAD(&fcport->sess_cmd_list);
  4781. spin_lock_init(&fcport->sess_cmd_lock);
  4782. spin_lock_init(&fcport->edif.sa_list_lock);
  4783. INIT_LIST_HEAD(&fcport->edif.tx_sa_list);
  4784. INIT_LIST_HEAD(&fcport->edif.rx_sa_list);
  4785. spin_lock_init(&fcport->edif.indx_list_lock);
  4786. INIT_LIST_HEAD(&fcport->edif.edif_indx_list);
  4787. return fcport;
  4788. }
  4789. void
  4790. qla2x00_free_fcport(fc_port_t *fcport)
  4791. {
  4792. if (fcport->ct_desc.ct_sns) {
  4793. dma_free_coherent(&fcport->vha->hw->pdev->dev,
  4794. sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
  4795. fcport->ct_desc.ct_sns_dma);
  4796. fcport->ct_desc.ct_sns = NULL;
  4797. }
  4798. qla_edif_flush_sa_ctl_lists(fcport);
  4799. list_del(&fcport->list);
  4800. qla2x00_clear_loop_id(fcport);
  4801. qla_edif_list_del(fcport);
  4802. kfree(fcport);
  4803. }
  4804. static void qla_get_login_template(scsi_qla_host_t *vha)
  4805. {
  4806. struct qla_hw_data *ha = vha->hw;
  4807. int rval;
  4808. u32 *bp, sz;
  4809. __be32 *q;
  4810. memset(ha->init_cb, 0, ha->init_cb_size);
  4811. sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
  4812. rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
  4813. ha->init_cb, sz);
  4814. if (rval != QLA_SUCCESS) {
  4815. ql_dbg(ql_dbg_init, vha, 0x00d1,
  4816. "PLOGI ELS param read fail.\n");
  4817. return;
  4818. }
  4819. q = (__be32 *)&ha->plogi_els_payld.fl_csp;
  4820. bp = (uint32_t *)ha->init_cb;
  4821. cpu_to_be32_array(q, bp, sz / 4);
  4822. ha->flags.plogi_template_valid = 1;
  4823. }
  4824. /*
  4825. * qla2x00_configure_loop
  4826. * Updates Fibre Channel Device Database with what is actually on loop.
  4827. *
  4828. * Input:
  4829. * ha = adapter block pointer.
  4830. *
  4831. * Returns:
  4832. * 0 = success.
  4833. * 1 = error.
  4834. * 2 = database was full and device was not configured.
  4835. */
  4836. static int
  4837. qla2x00_configure_loop(scsi_qla_host_t *vha)
  4838. {
  4839. int rval;
  4840. unsigned long flags, save_flags;
  4841. struct qla_hw_data *ha = vha->hw;
  4842. rval = QLA_SUCCESS;
  4843. /* Get Initiator ID */
  4844. if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
  4845. rval = qla2x00_configure_hba(vha);
  4846. if (rval != QLA_SUCCESS) {
  4847. ql_dbg(ql_dbg_disc, vha, 0x2013,
  4848. "Unable to configure HBA.\n");
  4849. return (rval);
  4850. }
  4851. }
  4852. save_flags = flags = vha->dpc_flags;
  4853. ql_dbg(ql_dbg_disc, vha, 0x2014,
  4854. "Configure loop -- dpc flags = 0x%lx.\n", flags);
  4855. /*
  4856. * If we have both an RSCN and PORT UPDATE pending then handle them
  4857. * both at the same time.
  4858. */
  4859. clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  4860. clear_bit(RSCN_UPDATE, &vha->dpc_flags);
  4861. qla2x00_get_data_rate(vha);
  4862. qla_get_login_template(vha);
  4863. /* Determine what we need to do */
  4864. if ((ha->current_topology == ISP_CFG_FL ||
  4865. ha->current_topology == ISP_CFG_F) &&
  4866. (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
  4867. set_bit(RSCN_UPDATE, &flags);
  4868. clear_bit(LOCAL_LOOP_UPDATE, &flags);
  4869. } else if (ha->current_topology == ISP_CFG_NL ||
  4870. ha->current_topology == ISP_CFG_N) {
  4871. clear_bit(RSCN_UPDATE, &flags);
  4872. set_bit(LOCAL_LOOP_UPDATE, &flags);
  4873. } else if (!vha->flags.online ||
  4874. (test_bit(ABORT_ISP_ACTIVE, &flags))) {
  4875. set_bit(RSCN_UPDATE, &flags);
  4876. set_bit(LOCAL_LOOP_UPDATE, &flags);
  4877. }
  4878. if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
  4879. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
  4880. ql_dbg(ql_dbg_disc, vha, 0x2015,
  4881. "Loop resync needed, failing.\n");
  4882. rval = QLA_FUNCTION_FAILED;
  4883. } else
  4884. rval = qla2x00_configure_local_loop(vha);
  4885. }
  4886. if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
  4887. if (LOOP_TRANSITION(vha)) {
  4888. ql_dbg(ql_dbg_disc, vha, 0x2099,
  4889. "Needs RSCN update and loop transition.\n");
  4890. rval = QLA_FUNCTION_FAILED;
  4891. }
  4892. else
  4893. rval = qla2x00_configure_fabric(vha);
  4894. }
  4895. if (rval == QLA_SUCCESS) {
  4896. if (atomic_read(&vha->loop_down_timer) ||
  4897. test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
  4898. rval = QLA_FUNCTION_FAILED;
  4899. } else {
  4900. atomic_set(&vha->loop_state, LOOP_READY);
  4901. ql_dbg(ql_dbg_disc, vha, 0x2069,
  4902. "LOOP READY.\n");
  4903. ha->flags.fw_init_done = 1;
  4904. /*
  4905. * use link up to wake up app to get ready for
  4906. * authentication.
  4907. */
  4908. if (ha->flags.edif_enabled && DBELL_INACTIVE(vha))
  4909. qla2x00_post_aen_work(vha, FCH_EVT_LINKUP,
  4910. ha->link_data_rate);
  4911. /*
  4912. * Process any ATIO queue entries that came in
  4913. * while we weren't online.
  4914. */
  4915. if (qla_tgt_mode_enabled(vha) ||
  4916. qla_dual_mode_enabled(vha)) {
  4917. spin_lock_irqsave(&ha->tgt.atio_lock, flags);
  4918. qlt_24xx_process_atio_queue(vha, 0);
  4919. spin_unlock_irqrestore(&ha->tgt.atio_lock,
  4920. flags);
  4921. }
  4922. }
  4923. }
  4924. if (rval) {
  4925. ql_dbg(ql_dbg_disc, vha, 0x206a,
  4926. "%s *** FAILED ***.\n", __func__);
  4927. } else {
  4928. ql_dbg(ql_dbg_disc, vha, 0x206b,
  4929. "%s: exiting normally. local port wwpn %8phN id %06x)\n",
  4930. __func__, vha->port_name, vha->d_id.b24);
  4931. }
  4932. /* Restore state if a resync event occurred during processing */
  4933. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
  4934. if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
  4935. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  4936. if (test_bit(RSCN_UPDATE, &save_flags)) {
  4937. set_bit(RSCN_UPDATE, &vha->dpc_flags);
  4938. }
  4939. }
  4940. return (rval);
  4941. }
  4942. static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
  4943. {
  4944. unsigned long flags;
  4945. fc_port_t *fcport;
  4946. ql_dbg(ql_dbg_disc, vha, 0x206a, "%s %d.\n", __func__, __LINE__);
  4947. if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags))
  4948. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  4949. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  4950. if (fcport->n2n_flag) {
  4951. qla24xx_fcport_handle_login(vha, fcport);
  4952. return QLA_SUCCESS;
  4953. }
  4954. }
  4955. spin_lock_irqsave(&vha->work_lock, flags);
  4956. vha->scan.scan_retry++;
  4957. spin_unlock_irqrestore(&vha->work_lock, flags);
  4958. if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
  4959. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  4960. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  4961. }
  4962. return QLA_FUNCTION_FAILED;
  4963. }
  4964. static void
  4965. qla_reinitialize_link(scsi_qla_host_t *vha)
  4966. {
  4967. int rval;
  4968. atomic_set(&vha->loop_state, LOOP_DOWN);
  4969. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  4970. rval = qla2x00_full_login_lip(vha);
  4971. if (rval == QLA_SUCCESS) {
  4972. ql_dbg(ql_dbg_disc, vha, 0xd050, "Link reinitialized\n");
  4973. } else {
  4974. ql_dbg(ql_dbg_disc, vha, 0xd051,
  4975. "Link reinitialization failed (%d)\n", rval);
  4976. }
  4977. }
  4978. /*
  4979. * qla2x00_configure_local_loop
  4980. * Updates Fibre Channel Device Database with local loop devices.
  4981. *
  4982. * Input:
  4983. * ha = adapter block pointer.
  4984. *
  4985. * Returns:
  4986. * 0 = success.
  4987. */
  4988. static int
  4989. qla2x00_configure_local_loop(scsi_qla_host_t *vha)
  4990. {
  4991. int rval, rval2;
  4992. int found_devs;
  4993. int found;
  4994. fc_port_t *fcport, *new_fcport;
  4995. uint16_t index;
  4996. uint16_t entries;
  4997. struct gid_list_info *gid;
  4998. uint16_t loop_id;
  4999. uint8_t domain, area, al_pa;
  5000. struct qla_hw_data *ha = vha->hw;
  5001. unsigned long flags;
  5002. /* Inititae N2N login. */
  5003. if (N2N_TOPO(ha))
  5004. return qla2x00_configure_n2n_loop(vha);
  5005. found_devs = 0;
  5006. new_fcport = NULL;
  5007. entries = MAX_FIBRE_DEVICES_LOOP;
  5008. /* Get list of logged in devices. */
  5009. memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
  5010. rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
  5011. &entries);
  5012. if (rval != QLA_SUCCESS)
  5013. goto err;
  5014. ql_dbg(ql_dbg_disc, vha, 0x2011,
  5015. "Entries in ID list (%d).\n", entries);
  5016. ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
  5017. ha->gid_list, entries * sizeof(*ha->gid_list));
  5018. if (entries == 0) {
  5019. spin_lock_irqsave(&vha->work_lock, flags);
  5020. vha->scan.scan_retry++;
  5021. spin_unlock_irqrestore(&vha->work_lock, flags);
  5022. if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
  5023. u8 loop_map_entries = 0;
  5024. int rc;
  5025. rc = qla2x00_get_fcal_position_map(vha, NULL,
  5026. &loop_map_entries);
  5027. if (rc == QLA_SUCCESS && loop_map_entries > 1) {
  5028. /*
  5029. * There are devices that are still not logged
  5030. * in. Reinitialize to give them a chance.
  5031. */
  5032. qla_reinitialize_link(vha);
  5033. return QLA_FUNCTION_FAILED;
  5034. }
  5035. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  5036. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  5037. }
  5038. } else {
  5039. vha->scan.scan_retry = 0;
  5040. }
  5041. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  5042. fcport->scan_state = QLA_FCPORT_SCAN;
  5043. }
  5044. /* Allocate temporary fcport for any new fcports discovered. */
  5045. new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  5046. if (new_fcport == NULL) {
  5047. ql_log(ql_log_warn, vha, 0x2012,
  5048. "Memory allocation failed for fcport.\n");
  5049. rval = QLA_MEMORY_ALLOC_FAILED;
  5050. goto err;
  5051. }
  5052. new_fcport->flags &= ~FCF_FABRIC_DEVICE;
  5053. /* Add devices to port list. */
  5054. gid = ha->gid_list;
  5055. for (index = 0; index < entries; index++) {
  5056. domain = gid->domain;
  5057. area = gid->area;
  5058. al_pa = gid->al_pa;
  5059. if (IS_QLA2100(ha) || IS_QLA2200(ha))
  5060. loop_id = gid->loop_id_2100;
  5061. else
  5062. loop_id = le16_to_cpu(gid->loop_id);
  5063. gid = (void *)gid + ha->gid_list_info_size;
  5064. /* Bypass reserved domain fields. */
  5065. if ((domain & 0xf0) == 0xf0)
  5066. continue;
  5067. /* Bypass if not same domain and area of adapter. */
  5068. if (area && domain && ((area != vha->d_id.b.area) ||
  5069. (domain != vha->d_id.b.domain)) &&
  5070. (ha->current_topology == ISP_CFG_NL))
  5071. continue;
  5072. /* Bypass invalid local loop ID. */
  5073. if (loop_id > LAST_LOCAL_LOOP_ID)
  5074. continue;
  5075. memset(new_fcport->port_name, 0, WWN_SIZE);
  5076. /* Fill in member data. */
  5077. new_fcport->d_id.b.domain = domain;
  5078. new_fcport->d_id.b.area = area;
  5079. new_fcport->d_id.b.al_pa = al_pa;
  5080. new_fcport->loop_id = loop_id;
  5081. new_fcport->scan_state = QLA_FCPORT_FOUND;
  5082. rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
  5083. if (rval2 != QLA_SUCCESS) {
  5084. ql_dbg(ql_dbg_disc, vha, 0x2097,
  5085. "Failed to retrieve fcport information "
  5086. "-- get_port_database=%x, loop_id=0x%04x.\n",
  5087. rval2, new_fcport->loop_id);
  5088. /* Skip retry if N2N */
  5089. if (ha->current_topology != ISP_CFG_N) {
  5090. ql_dbg(ql_dbg_disc, vha, 0x2105,
  5091. "Scheduling resync.\n");
  5092. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  5093. continue;
  5094. }
  5095. }
  5096. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  5097. /* Check for matching device in port list. */
  5098. found = 0;
  5099. fcport = NULL;
  5100. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  5101. if (memcmp(new_fcport->port_name, fcport->port_name,
  5102. WWN_SIZE))
  5103. continue;
  5104. fcport->flags &= ~FCF_FABRIC_DEVICE;
  5105. fcport->loop_id = new_fcport->loop_id;
  5106. fcport->port_type = new_fcport->port_type;
  5107. fcport->d_id.b24 = new_fcport->d_id.b24;
  5108. memcpy(fcport->node_name, new_fcport->node_name,
  5109. WWN_SIZE);
  5110. fcport->scan_state = QLA_FCPORT_FOUND;
  5111. if (fcport->login_retry == 0) {
  5112. fcport->login_retry = vha->hw->login_retry_count;
  5113. ql_dbg(ql_dbg_disc, vha, 0x2135,
  5114. "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
  5115. fcport->port_name, fcport->loop_id,
  5116. fcport->login_retry);
  5117. }
  5118. found++;
  5119. break;
  5120. }
  5121. if (!found) {
  5122. /* New device, add to fcports list. */
  5123. list_add_tail(&new_fcport->list, &vha->vp_fcports);
  5124. /* Allocate a new replacement fcport. */
  5125. fcport = new_fcport;
  5126. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  5127. new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  5128. if (new_fcport == NULL) {
  5129. ql_log(ql_log_warn, vha, 0xd031,
  5130. "Failed to allocate memory for fcport.\n");
  5131. rval = QLA_MEMORY_ALLOC_FAILED;
  5132. goto err;
  5133. }
  5134. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  5135. new_fcport->flags &= ~FCF_FABRIC_DEVICE;
  5136. }
  5137. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  5138. /* Base iIDMA settings on HBA port speed. */
  5139. fcport->fp_speed = ha->link_data_rate;
  5140. found_devs++;
  5141. }
  5142. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  5143. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  5144. break;
  5145. if (fcport->scan_state == QLA_FCPORT_SCAN) {
  5146. if ((qla_dual_mode_enabled(vha) ||
  5147. qla_ini_mode_enabled(vha)) &&
  5148. atomic_read(&fcport->state) == FCS_ONLINE) {
  5149. qla2x00_mark_device_lost(vha, fcport,
  5150. ql2xplogiabsentdevice);
  5151. if (fcport->loop_id != FC_NO_LOOP_ID &&
  5152. (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
  5153. fcport->port_type != FCT_INITIATOR &&
  5154. fcport->port_type != FCT_BROADCAST) {
  5155. ql_dbg(ql_dbg_disc, vha, 0x20f0,
  5156. "%s %d %8phC post del sess\n",
  5157. __func__, __LINE__,
  5158. fcport->port_name);
  5159. qlt_schedule_sess_for_deletion(fcport);
  5160. continue;
  5161. }
  5162. }
  5163. }
  5164. if (fcport->scan_state == QLA_FCPORT_FOUND)
  5165. qla24xx_fcport_handle_login(vha, fcport);
  5166. }
  5167. qla2x00_free_fcport(new_fcport);
  5168. return rval;
  5169. err:
  5170. ql_dbg(ql_dbg_disc, vha, 0x2098,
  5171. "Configure local loop error exit: rval=%x.\n", rval);
  5172. return rval;
  5173. }
  5174. static void
  5175. qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
  5176. {
  5177. int rval;
  5178. uint16_t mb[MAILBOX_REGISTER_COUNT];
  5179. struct qla_hw_data *ha = vha->hw;
  5180. if (!IS_IIDMA_CAPABLE(ha))
  5181. return;
  5182. if (atomic_read(&fcport->state) != FCS_ONLINE)
  5183. return;
  5184. if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
  5185. fcport->fp_speed > ha->link_data_rate ||
  5186. !ha->flags.gpsc_supported)
  5187. return;
  5188. rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
  5189. mb);
  5190. if (rval != QLA_SUCCESS) {
  5191. ql_dbg(ql_dbg_disc, vha, 0x2004,
  5192. "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
  5193. fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
  5194. } else {
  5195. ql_dbg(ql_dbg_disc, vha, 0x2005,
  5196. "iIDMA adjusted to %s GB/s (%X) on %8phN.\n",
  5197. qla2x00_get_link_speed_str(ha, fcport->fp_speed),
  5198. fcport->fp_speed, fcport->port_name);
  5199. }
  5200. }
  5201. void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
  5202. {
  5203. qla2x00_iidma_fcport(vha, fcport);
  5204. qla24xx_update_fcport_fcp_prio(vha, fcport);
  5205. }
  5206. int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
  5207. {
  5208. struct qla_work_evt *e;
  5209. e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
  5210. if (!e)
  5211. return QLA_FUNCTION_FAILED;
  5212. e->u.fcport.fcport = fcport;
  5213. return qla2x00_post_work(vha, e);
  5214. }
  5215. /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
  5216. static void
  5217. qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
  5218. {
  5219. struct fc_rport_identifiers rport_ids;
  5220. struct fc_rport *rport;
  5221. unsigned long flags;
  5222. if (atomic_read(&fcport->state) == FCS_ONLINE)
  5223. return;
  5224. rport_ids.node_name = wwn_to_u64(fcport->node_name);
  5225. rport_ids.port_name = wwn_to_u64(fcport->port_name);
  5226. rport_ids.port_id = fcport->d_id.b.domain << 16 |
  5227. fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
  5228. rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
  5229. fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
  5230. if (!rport) {
  5231. ql_log(ql_log_warn, vha, 0x2006,
  5232. "Unable to allocate fc remote port.\n");
  5233. return;
  5234. }
  5235. spin_lock_irqsave(fcport->vha->host->host_lock, flags);
  5236. *((fc_port_t **)rport->dd_data) = fcport;
  5237. spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
  5238. fcport->dev_loss_tmo = rport->dev_loss_tmo;
  5239. rport->supported_classes = fcport->supported_classes;
  5240. rport_ids.roles = FC_PORT_ROLE_UNKNOWN;
  5241. if (fcport->port_type == FCT_INITIATOR)
  5242. rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
  5243. if (fcport->port_type == FCT_TARGET)
  5244. rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
  5245. if (fcport->port_type & FCT_NVME_INITIATOR)
  5246. rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
  5247. if (fcport->port_type & FCT_NVME_TARGET)
  5248. rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
  5249. if (fcport->port_type & FCT_NVME_DISCOVERY)
  5250. rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
  5251. fc_remote_port_rolechg(rport, rport_ids.roles);
  5252. ql_dbg(ql_dbg_disc, vha, 0x20ee,
  5253. "%s: %8phN. rport %ld:0:%d (%p) is %s mode\n",
  5254. __func__, fcport->port_name, vha->host_no,
  5255. rport->scsi_target_id, rport,
  5256. (fcport->port_type == FCT_TARGET) ? "tgt" :
  5257. ((fcport->port_type & FCT_NVME) ? "nvme" : "ini"));
  5258. }
  5259. /*
  5260. * qla2x00_update_fcport
  5261. * Updates device on list.
  5262. *
  5263. * Input:
  5264. * ha = adapter block pointer.
  5265. * fcport = port structure pointer.
  5266. *
  5267. * Return:
  5268. * 0 - Success
  5269. * BIT_0 - error
  5270. *
  5271. * Context:
  5272. * Kernel context.
  5273. */
  5274. void
  5275. qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
  5276. {
  5277. unsigned long flags;
  5278. if (IS_SW_RESV_ADDR(fcport->d_id))
  5279. return;
  5280. ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
  5281. __func__, fcport->port_name);
  5282. qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
  5283. fcport->login_retry = vha->hw->login_retry_count;
  5284. fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
  5285. spin_lock_irqsave(&vha->work_lock, flags);
  5286. fcport->deleted = 0;
  5287. spin_unlock_irqrestore(&vha->work_lock, flags);
  5288. if (vha->hw->current_topology == ISP_CFG_NL)
  5289. fcport->logout_on_delete = 0;
  5290. else
  5291. fcport->logout_on_delete = 1;
  5292. fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
  5293. if (fcport->tgt_link_down_time < fcport->dev_loss_tmo) {
  5294. fcport->tgt_short_link_down_cnt++;
  5295. fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
  5296. }
  5297. switch (vha->hw->current_topology) {
  5298. case ISP_CFG_N:
  5299. case ISP_CFG_NL:
  5300. fcport->keep_nport_handle = 1;
  5301. break;
  5302. default:
  5303. break;
  5304. }
  5305. qla2x00_iidma_fcport(vha, fcport);
  5306. qla2x00_dfs_create_rport(vha, fcport);
  5307. qla24xx_update_fcport_fcp_prio(vha, fcport);
  5308. switch (vha->host->active_mode) {
  5309. case MODE_INITIATOR:
  5310. qla2x00_reg_remote_port(vha, fcport);
  5311. break;
  5312. case MODE_TARGET:
  5313. if (!vha->vha_tgt.qla_tgt->tgt_stop &&
  5314. !vha->vha_tgt.qla_tgt->tgt_stopped)
  5315. qlt_fc_port_added(vha, fcport);
  5316. break;
  5317. case MODE_DUAL:
  5318. qla2x00_reg_remote_port(vha, fcport);
  5319. if (!vha->vha_tgt.qla_tgt->tgt_stop &&
  5320. !vha->vha_tgt.qla_tgt->tgt_stopped)
  5321. qlt_fc_port_added(vha, fcport);
  5322. break;
  5323. default:
  5324. break;
  5325. }
  5326. if (NVME_TARGET(vha->hw, fcport))
  5327. qla_nvme_register_remote(vha, fcport);
  5328. qla2x00_set_fcport_state(fcport, FCS_ONLINE);
  5329. if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
  5330. if (fcport->id_changed) {
  5331. fcport->id_changed = 0;
  5332. ql_dbg(ql_dbg_disc, vha, 0x20d7,
  5333. "%s %d %8phC post gfpnid fcp_cnt %d\n",
  5334. __func__, __LINE__, fcport->port_name,
  5335. vha->fcport_count);
  5336. qla24xx_post_gfpnid_work(vha, fcport);
  5337. } else {
  5338. ql_dbg(ql_dbg_disc, vha, 0x20d7,
  5339. "%s %d %8phC post gpsc fcp_cnt %d\n",
  5340. __func__, __LINE__, fcport->port_name,
  5341. vha->fcport_count);
  5342. qla24xx_post_gpsc_work(vha, fcport);
  5343. }
  5344. }
  5345. qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
  5346. }
  5347. void qla_register_fcport_fn(struct work_struct *work)
  5348. {
  5349. fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
  5350. u32 rscn_gen = fcport->rscn_gen;
  5351. u16 data[2];
  5352. if (IS_SW_RESV_ADDR(fcport->d_id))
  5353. return;
  5354. qla2x00_update_fcport(fcport->vha, fcport);
  5355. ql_dbg(ql_dbg_disc, fcport->vha, 0x911e,
  5356. "%s rscn gen %d/%d next DS %d\n", __func__,
  5357. rscn_gen, fcport->rscn_gen, fcport->next_disc_state);
  5358. if (rscn_gen != fcport->rscn_gen) {
  5359. /* RSCN(s) came in while registration */
  5360. switch (fcport->next_disc_state) {
  5361. case DSC_DELETE_PEND:
  5362. qlt_schedule_sess_for_deletion(fcport);
  5363. break;
  5364. case DSC_ADISC:
  5365. data[0] = data[1] = 0;
  5366. qla2x00_post_async_adisc_work(fcport->vha, fcport,
  5367. data);
  5368. break;
  5369. default:
  5370. break;
  5371. }
  5372. }
  5373. }
  5374. /*
  5375. * qla2x00_configure_fabric
  5376. * Setup SNS devices with loop ID's.
  5377. *
  5378. * Input:
  5379. * ha = adapter block pointer.
  5380. *
  5381. * Returns:
  5382. * 0 = success.
  5383. * BIT_0 = error
  5384. */
  5385. static int
  5386. qla2x00_configure_fabric(scsi_qla_host_t *vha)
  5387. {
  5388. int rval;
  5389. fc_port_t *fcport;
  5390. uint16_t mb[MAILBOX_REGISTER_COUNT];
  5391. uint16_t loop_id;
  5392. LIST_HEAD(new_fcports);
  5393. struct qla_hw_data *ha = vha->hw;
  5394. int discovery_gen;
  5395. /* If FL port exists, then SNS is present */
  5396. if (IS_FWI2_CAPABLE(ha))
  5397. loop_id = NPH_F_PORT;
  5398. else
  5399. loop_id = SNS_FL_PORT;
  5400. rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
  5401. if (rval != QLA_SUCCESS) {
  5402. ql_dbg(ql_dbg_disc, vha, 0x20a0,
  5403. "MBX_GET_PORT_NAME failed, No FL Port.\n");
  5404. vha->device_flags &= ~SWITCH_FOUND;
  5405. return (QLA_SUCCESS);
  5406. }
  5407. vha->device_flags |= SWITCH_FOUND;
  5408. rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0);
  5409. if (rval != QLA_SUCCESS)
  5410. ql_dbg(ql_dbg_disc, vha, 0x20ff,
  5411. "Failed to get Fabric Port Name\n");
  5412. if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
  5413. rval = qla2x00_send_change_request(vha, 0x3, 0);
  5414. if (rval != QLA_SUCCESS)
  5415. ql_log(ql_log_warn, vha, 0x121,
  5416. "Failed to enable receiving of RSCN requests: 0x%x.\n",
  5417. rval);
  5418. }
  5419. do {
  5420. qla2x00_mgmt_svr_login(vha);
  5421. /* Ensure we are logged into the SNS. */
  5422. loop_id = NPH_SNS_LID(ha);
  5423. rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
  5424. 0xfc, mb, BIT_1|BIT_0);
  5425. if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
  5426. ql_dbg(ql_dbg_disc, vha, 0x20a1,
  5427. "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
  5428. loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
  5429. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  5430. return rval;
  5431. }
  5432. /* FDMI support. */
  5433. if (ql2xfdmienable &&
  5434. test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
  5435. qla2x00_fdmi_register(vha);
  5436. if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
  5437. if (qla2x00_rft_id(vha)) {
  5438. /* EMPTY */
  5439. ql_dbg(ql_dbg_disc, vha, 0x20a2,
  5440. "Register FC-4 TYPE failed.\n");
  5441. if (test_bit(LOOP_RESYNC_NEEDED,
  5442. &vha->dpc_flags))
  5443. break;
  5444. }
  5445. if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
  5446. /* EMPTY */
  5447. ql_dbg(ql_dbg_disc, vha, 0x209a,
  5448. "Register FC-4 Features failed.\n");
  5449. if (test_bit(LOOP_RESYNC_NEEDED,
  5450. &vha->dpc_flags))
  5451. break;
  5452. }
  5453. if (vha->flags.nvme_enabled) {
  5454. if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
  5455. ql_dbg(ql_dbg_disc, vha, 0x2049,
  5456. "Register NVME FC Type Features failed.\n");
  5457. }
  5458. }
  5459. if (qla2x00_rnn_id(vha)) {
  5460. /* EMPTY */
  5461. ql_dbg(ql_dbg_disc, vha, 0x2104,
  5462. "Register Node Name failed.\n");
  5463. if (test_bit(LOOP_RESYNC_NEEDED,
  5464. &vha->dpc_flags))
  5465. break;
  5466. } else if (qla2x00_rsnn_nn(vha)) {
  5467. /* EMPTY */
  5468. ql_dbg(ql_dbg_disc, vha, 0x209b,
  5469. "Register Symbolic Node Name failed.\n");
  5470. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  5471. break;
  5472. }
  5473. }
  5474. /* Mark the time right before querying FW for connected ports.
  5475. * This process is long, asynchronous and by the time it's done,
  5476. * collected information might not be accurate anymore. E.g.
  5477. * disconnected port might have re-connected and a brand new
  5478. * session has been created. In this case session's generation
  5479. * will be newer than discovery_gen. */
  5480. qlt_do_generation_tick(vha, &discovery_gen);
  5481. if (USE_ASYNC_SCAN(ha)) {
  5482. rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
  5483. NULL);
  5484. if (rval)
  5485. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  5486. } else {
  5487. list_for_each_entry(fcport, &vha->vp_fcports, list)
  5488. fcport->scan_state = QLA_FCPORT_SCAN;
  5489. rval = qla2x00_find_all_fabric_devs(vha);
  5490. }
  5491. if (rval != QLA_SUCCESS)
  5492. break;
  5493. } while (0);
  5494. if (!vha->nvme_local_port && vha->flags.nvme_enabled)
  5495. qla_nvme_register_hba(vha);
  5496. if (rval)
  5497. ql_dbg(ql_dbg_disc, vha, 0x2068,
  5498. "Configure fabric error exit rval=%d.\n", rval);
  5499. return (rval);
  5500. }
  5501. /*
  5502. * qla2x00_find_all_fabric_devs
  5503. *
  5504. * Input:
  5505. * ha = adapter block pointer.
  5506. * dev = database device entry pointer.
  5507. *
  5508. * Returns:
  5509. * 0 = success.
  5510. *
  5511. * Context:
  5512. * Kernel context.
  5513. */
  5514. static int
  5515. qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
  5516. {
  5517. int rval;
  5518. uint16_t loop_id;
  5519. fc_port_t *fcport, *new_fcport;
  5520. int found;
  5521. sw_info_t *swl;
  5522. int swl_idx;
  5523. int first_dev, last_dev;
  5524. port_id_t wrap = {}, nxt_d_id;
  5525. struct qla_hw_data *ha = vha->hw;
  5526. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  5527. unsigned long flags;
  5528. rval = QLA_SUCCESS;
  5529. /* Try GID_PT to get device list, else GAN. */
  5530. if (!ha->swl)
  5531. ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
  5532. GFP_KERNEL);
  5533. swl = ha->swl;
  5534. if (!swl) {
  5535. /*EMPTY*/
  5536. ql_dbg(ql_dbg_disc, vha, 0x209c,
  5537. "GID_PT allocations failed, fallback on GA_NXT.\n");
  5538. } else {
  5539. memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
  5540. if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
  5541. swl = NULL;
  5542. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  5543. return rval;
  5544. } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
  5545. swl = NULL;
  5546. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  5547. return rval;
  5548. } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
  5549. swl = NULL;
  5550. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  5551. return rval;
  5552. } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
  5553. swl = NULL;
  5554. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  5555. return rval;
  5556. }
  5557. /* If other queries succeeded probe for FC-4 type */
  5558. if (swl) {
  5559. qla2x00_gff_id(vha, swl);
  5560. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  5561. return rval;
  5562. }
  5563. }
  5564. swl_idx = 0;
  5565. /* Allocate temporary fcport for any new fcports discovered. */
  5566. new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  5567. if (new_fcport == NULL) {
  5568. ql_log(ql_log_warn, vha, 0x209d,
  5569. "Failed to allocate memory for fcport.\n");
  5570. return (QLA_MEMORY_ALLOC_FAILED);
  5571. }
  5572. new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
  5573. /* Set start port ID scan at adapter ID. */
  5574. first_dev = 1;
  5575. last_dev = 0;
  5576. /* Starting free loop ID. */
  5577. loop_id = ha->min_external_loopid;
  5578. for (; loop_id <= ha->max_loop_id; loop_id++) {
  5579. if (qla2x00_is_reserved_id(vha, loop_id))
  5580. continue;
  5581. if (ha->current_topology == ISP_CFG_FL &&
  5582. (atomic_read(&vha->loop_down_timer) ||
  5583. LOOP_TRANSITION(vha))) {
  5584. atomic_set(&vha->loop_down_timer, 0);
  5585. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  5586. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  5587. break;
  5588. }
  5589. if (swl != NULL) {
  5590. if (last_dev) {
  5591. wrap.b24 = new_fcport->d_id.b24;
  5592. } else {
  5593. new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
  5594. memcpy(new_fcport->node_name,
  5595. swl[swl_idx].node_name, WWN_SIZE);
  5596. memcpy(new_fcport->port_name,
  5597. swl[swl_idx].port_name, WWN_SIZE);
  5598. memcpy(new_fcport->fabric_port_name,
  5599. swl[swl_idx].fabric_port_name, WWN_SIZE);
  5600. new_fcport->fp_speed = swl[swl_idx].fp_speed;
  5601. new_fcport->fc4_type = swl[swl_idx].fc4_type;
  5602. new_fcport->nvme_flag = 0;
  5603. if (vha->flags.nvme_enabled &&
  5604. swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) {
  5605. ql_log(ql_log_info, vha, 0x2131,
  5606. "FOUND: NVME port %8phC as FC Type 28h\n",
  5607. new_fcport->port_name);
  5608. }
  5609. if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
  5610. last_dev = 1;
  5611. }
  5612. swl_idx++;
  5613. }
  5614. } else {
  5615. /* Send GA_NXT to the switch */
  5616. rval = qla2x00_ga_nxt(vha, new_fcport);
  5617. if (rval != QLA_SUCCESS) {
  5618. ql_log(ql_log_warn, vha, 0x209e,
  5619. "SNS scan failed -- assuming "
  5620. "zero-entry result.\n");
  5621. rval = QLA_SUCCESS;
  5622. break;
  5623. }
  5624. }
  5625. /* If wrap on switch device list, exit. */
  5626. if (first_dev) {
  5627. wrap.b24 = new_fcport->d_id.b24;
  5628. first_dev = 0;
  5629. } else if (new_fcport->d_id.b24 == wrap.b24) {
  5630. ql_dbg(ql_dbg_disc, vha, 0x209f,
  5631. "Device wrap (%02x%02x%02x).\n",
  5632. new_fcport->d_id.b.domain,
  5633. new_fcport->d_id.b.area,
  5634. new_fcport->d_id.b.al_pa);
  5635. break;
  5636. }
  5637. /* Bypass if same physical adapter. */
  5638. if (new_fcport->d_id.b24 == base_vha->d_id.b24)
  5639. continue;
  5640. /* Bypass virtual ports of the same host. */
  5641. if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
  5642. continue;
  5643. /* Bypass if same domain and area of adapter. */
  5644. if (((new_fcport->d_id.b24 & 0xffff00) ==
  5645. (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
  5646. ISP_CFG_FL)
  5647. continue;
  5648. /* Bypass reserved domain fields. */
  5649. if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
  5650. continue;
  5651. /* Bypass ports whose FCP-4 type is not FCP_SCSI */
  5652. if (ql2xgffidenable &&
  5653. (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) &&
  5654. new_fcport->fc4_type != 0))
  5655. continue;
  5656. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  5657. /* Locate matching device in database. */
  5658. found = 0;
  5659. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  5660. if (memcmp(new_fcport->port_name, fcport->port_name,
  5661. WWN_SIZE))
  5662. continue;
  5663. fcport->scan_state = QLA_FCPORT_FOUND;
  5664. found++;
  5665. /* Update port state. */
  5666. memcpy(fcport->fabric_port_name,
  5667. new_fcport->fabric_port_name, WWN_SIZE);
  5668. fcport->fp_speed = new_fcport->fp_speed;
  5669. /*
  5670. * If address the same and state FCS_ONLINE
  5671. * (or in target mode), nothing changed.
  5672. */
  5673. if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
  5674. (atomic_read(&fcport->state) == FCS_ONLINE ||
  5675. (vha->host->active_mode == MODE_TARGET))) {
  5676. break;
  5677. }
  5678. if (fcport->login_retry == 0)
  5679. fcport->login_retry =
  5680. vha->hw->login_retry_count;
  5681. /*
  5682. * If device was not a fabric device before.
  5683. */
  5684. if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
  5685. fcport->d_id.b24 = new_fcport->d_id.b24;
  5686. qla2x00_clear_loop_id(fcport);
  5687. fcport->flags |= (FCF_FABRIC_DEVICE |
  5688. FCF_LOGIN_NEEDED);
  5689. break;
  5690. }
  5691. /*
  5692. * Port ID changed or device was marked to be updated;
  5693. * Log it out if still logged in and mark it for
  5694. * relogin later.
  5695. */
  5696. if (qla_tgt_mode_enabled(base_vha)) {
  5697. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
  5698. "port changed FC ID, %8phC"
  5699. " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
  5700. fcport->port_name,
  5701. fcport->d_id.b.domain,
  5702. fcport->d_id.b.area,
  5703. fcport->d_id.b.al_pa,
  5704. fcport->loop_id,
  5705. new_fcport->d_id.b.domain,
  5706. new_fcport->d_id.b.area,
  5707. new_fcport->d_id.b.al_pa);
  5708. fcport->d_id.b24 = new_fcport->d_id.b24;
  5709. break;
  5710. }
  5711. fcport->d_id.b24 = new_fcport->d_id.b24;
  5712. fcport->flags |= FCF_LOGIN_NEEDED;
  5713. break;
  5714. }
  5715. if (found && NVME_TARGET(vha->hw, fcport)) {
  5716. if (fcport->disc_state == DSC_DELETE_PEND) {
  5717. qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
  5718. vha->fcport_count--;
  5719. fcport->login_succ = 0;
  5720. }
  5721. }
  5722. if (found) {
  5723. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  5724. continue;
  5725. }
  5726. /* If device was not in our fcports list, then add it. */
  5727. new_fcport->scan_state = QLA_FCPORT_FOUND;
  5728. list_add_tail(&new_fcport->list, &vha->vp_fcports);
  5729. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  5730. /* Allocate a new replacement fcport. */
  5731. nxt_d_id.b24 = new_fcport->d_id.b24;
  5732. new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  5733. if (new_fcport == NULL) {
  5734. ql_log(ql_log_warn, vha, 0xd032,
  5735. "Memory allocation failed for fcport.\n");
  5736. return (QLA_MEMORY_ALLOC_FAILED);
  5737. }
  5738. new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
  5739. new_fcport->d_id.b24 = nxt_d_id.b24;
  5740. }
  5741. qla2x00_free_fcport(new_fcport);
  5742. /*
  5743. * Logout all previous fabric dev marked lost, except FCP2 devices.
  5744. */
  5745. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  5746. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  5747. break;
  5748. if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
  5749. continue;
  5750. if (fcport->scan_state == QLA_FCPORT_SCAN) {
  5751. if ((qla_dual_mode_enabled(vha) ||
  5752. qla_ini_mode_enabled(vha)) &&
  5753. atomic_read(&fcport->state) == FCS_ONLINE) {
  5754. qla2x00_mark_device_lost(vha, fcport,
  5755. ql2xplogiabsentdevice);
  5756. if (fcport->loop_id != FC_NO_LOOP_ID &&
  5757. (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
  5758. fcport->port_type != FCT_INITIATOR &&
  5759. fcport->port_type != FCT_BROADCAST) {
  5760. ql_dbg(ql_dbg_disc, vha, 0x20f0,
  5761. "%s %d %8phC post del sess\n",
  5762. __func__, __LINE__,
  5763. fcport->port_name);
  5764. qlt_schedule_sess_for_deletion(fcport);
  5765. continue;
  5766. }
  5767. }
  5768. }
  5769. if (fcport->scan_state == QLA_FCPORT_FOUND &&
  5770. (fcport->flags & FCF_LOGIN_NEEDED) != 0)
  5771. qla24xx_fcport_handle_login(vha, fcport);
  5772. }
  5773. return (rval);
  5774. }
  5775. /* FW does not set aside Loop id for MGMT Server/FFFFFAh */
  5776. int
  5777. qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
  5778. {
  5779. int loop_id = FC_NO_LOOP_ID;
  5780. int lid = NPH_MGMT_SERVER - vha->vp_idx;
  5781. unsigned long flags;
  5782. struct qla_hw_data *ha = vha->hw;
  5783. if (vha->vp_idx == 0) {
  5784. set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
  5785. return NPH_MGMT_SERVER;
  5786. }
  5787. /* pick id from high and work down to low */
  5788. spin_lock_irqsave(&ha->vport_slock, flags);
  5789. for (; lid > 0; lid--) {
  5790. if (!test_bit(lid, vha->hw->loop_id_map)) {
  5791. set_bit(lid, vha->hw->loop_id_map);
  5792. loop_id = lid;
  5793. break;
  5794. }
  5795. }
  5796. spin_unlock_irqrestore(&ha->vport_slock, flags);
  5797. return loop_id;
  5798. }
  5799. /*
  5800. * qla2x00_fabric_login
  5801. * Issue fabric login command.
  5802. *
  5803. * Input:
  5804. * ha = adapter block pointer.
  5805. * device = pointer to FC device type structure.
  5806. *
  5807. * Returns:
  5808. * 0 - Login successfully
  5809. * 1 - Login failed
  5810. * 2 - Initiator device
  5811. * 3 - Fatal error
  5812. */
  5813. int
  5814. qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
  5815. uint16_t *next_loopid)
  5816. {
  5817. int rval;
  5818. int retry;
  5819. uint16_t tmp_loopid;
  5820. uint16_t mb[MAILBOX_REGISTER_COUNT];
  5821. struct qla_hw_data *ha = vha->hw;
  5822. retry = 0;
  5823. tmp_loopid = 0;
  5824. for (;;) {
  5825. ql_dbg(ql_dbg_disc, vha, 0x2000,
  5826. "Trying Fabric Login w/loop id 0x%04x for port "
  5827. "%02x%02x%02x.\n",
  5828. fcport->loop_id, fcport->d_id.b.domain,
  5829. fcport->d_id.b.area, fcport->d_id.b.al_pa);
  5830. /* Login fcport on switch. */
  5831. rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
  5832. fcport->d_id.b.domain, fcport->d_id.b.area,
  5833. fcport->d_id.b.al_pa, mb, BIT_0);
  5834. if (rval != QLA_SUCCESS) {
  5835. return rval;
  5836. }
  5837. if (mb[0] == MBS_PORT_ID_USED) {
  5838. /*
  5839. * Device has another loop ID. The firmware team
  5840. * recommends the driver perform an implicit login with
  5841. * the specified ID again. The ID we just used is save
  5842. * here so we return with an ID that can be tried by
  5843. * the next login.
  5844. */
  5845. retry++;
  5846. tmp_loopid = fcport->loop_id;
  5847. fcport->loop_id = mb[1];
  5848. ql_dbg(ql_dbg_disc, vha, 0x2001,
  5849. "Fabric Login: port in use - next loop "
  5850. "id=0x%04x, port id= %02x%02x%02x.\n",
  5851. fcport->loop_id, fcport->d_id.b.domain,
  5852. fcport->d_id.b.area, fcport->d_id.b.al_pa);
  5853. } else if (mb[0] == MBS_COMMAND_COMPLETE) {
  5854. /*
  5855. * Login succeeded.
  5856. */
  5857. if (retry) {
  5858. /* A retry occurred before. */
  5859. *next_loopid = tmp_loopid;
  5860. } else {
  5861. /*
  5862. * No retry occurred before. Just increment the
  5863. * ID value for next login.
  5864. */
  5865. *next_loopid = (fcport->loop_id + 1);
  5866. }
  5867. if (mb[1] & BIT_0) {
  5868. fcport->port_type = FCT_INITIATOR;
  5869. } else {
  5870. fcport->port_type = FCT_TARGET;
  5871. if (mb[1] & BIT_1) {
  5872. fcport->flags |= FCF_FCP2_DEVICE;
  5873. }
  5874. }
  5875. if (mb[10] & BIT_0)
  5876. fcport->supported_classes |= FC_COS_CLASS2;
  5877. if (mb[10] & BIT_1)
  5878. fcport->supported_classes |= FC_COS_CLASS3;
  5879. if (IS_FWI2_CAPABLE(ha)) {
  5880. if (mb[10] & BIT_7)
  5881. fcport->flags |=
  5882. FCF_CONF_COMP_SUPPORTED;
  5883. }
  5884. rval = QLA_SUCCESS;
  5885. break;
  5886. } else if (mb[0] == MBS_LOOP_ID_USED) {
  5887. /*
  5888. * Loop ID already used, try next loop ID.
  5889. */
  5890. fcport->loop_id++;
  5891. rval = qla2x00_find_new_loop_id(vha, fcport);
  5892. if (rval != QLA_SUCCESS) {
  5893. /* Ran out of loop IDs to use */
  5894. break;
  5895. }
  5896. } else if (mb[0] == MBS_COMMAND_ERROR) {
  5897. /*
  5898. * Firmware possibly timed out during login. If NO
  5899. * retries are left to do then the device is declared
  5900. * dead.
  5901. */
  5902. *next_loopid = fcport->loop_id;
  5903. ha->isp_ops->fabric_logout(vha, fcport->loop_id,
  5904. fcport->d_id.b.domain, fcport->d_id.b.area,
  5905. fcport->d_id.b.al_pa);
  5906. qla2x00_mark_device_lost(vha, fcport, 1);
  5907. rval = 1;
  5908. break;
  5909. } else {
  5910. /*
  5911. * unrecoverable / not handled error
  5912. */
  5913. ql_dbg(ql_dbg_disc, vha, 0x2002,
  5914. "Failed=%x port_id=%02x%02x%02x loop_id=%x "
  5915. "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
  5916. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  5917. fcport->loop_id, jiffies);
  5918. *next_loopid = fcport->loop_id;
  5919. ha->isp_ops->fabric_logout(vha, fcport->loop_id,
  5920. fcport->d_id.b.domain, fcport->d_id.b.area,
  5921. fcport->d_id.b.al_pa);
  5922. qla2x00_clear_loop_id(fcport);
  5923. fcport->login_retry = 0;
  5924. rval = 3;
  5925. break;
  5926. }
  5927. }
  5928. return (rval);
  5929. }
  5930. /*
  5931. * qla2x00_local_device_login
  5932. * Issue local device login command.
  5933. *
  5934. * Input:
  5935. * ha = adapter block pointer.
  5936. * loop_id = loop id of device to login to.
  5937. *
  5938. * Returns (Where's the #define!!!!):
  5939. * 0 - Login successfully
  5940. * 1 - Login failed
  5941. * 3 - Fatal error
  5942. */
  5943. int
  5944. qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
  5945. {
  5946. int rval;
  5947. uint16_t mb[MAILBOX_REGISTER_COUNT];
  5948. memset(mb, 0, sizeof(mb));
  5949. rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
  5950. if (rval == QLA_SUCCESS) {
  5951. /* Interrogate mailbox registers for any errors */
  5952. if (mb[0] == MBS_COMMAND_ERROR)
  5953. rval = 1;
  5954. else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
  5955. /* device not in PCB table */
  5956. rval = 3;
  5957. }
  5958. return (rval);
  5959. }
  5960. /*
  5961. * qla2x00_loop_resync
  5962. * Resync with fibre channel devices.
  5963. *
  5964. * Input:
  5965. * ha = adapter block pointer.
  5966. *
  5967. * Returns:
  5968. * 0 = success
  5969. */
  5970. int
  5971. qla2x00_loop_resync(scsi_qla_host_t *vha)
  5972. {
  5973. int rval = QLA_SUCCESS;
  5974. uint32_t wait_time;
  5975. clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
  5976. if (vha->flags.online) {
  5977. if (!(rval = qla2x00_fw_ready(vha))) {
  5978. /* Wait at most MAX_TARGET RSCNs for a stable link. */
  5979. wait_time = 256;
  5980. do {
  5981. if (!IS_QLAFX00(vha->hw)) {
  5982. /*
  5983. * Issue a marker after FW becomes
  5984. * ready.
  5985. */
  5986. qla2x00_marker(vha, vha->hw->base_qpair,
  5987. 0, 0, MK_SYNC_ALL);
  5988. vha->marker_needed = 0;
  5989. }
  5990. /* Remap devices on Loop. */
  5991. clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  5992. if (IS_QLAFX00(vha->hw))
  5993. qlafx00_configure_devices(vha);
  5994. else
  5995. qla2x00_configure_loop(vha);
  5996. wait_time--;
  5997. } while (!atomic_read(&vha->loop_down_timer) &&
  5998. !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
  5999. && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
  6000. &vha->dpc_flags)));
  6001. }
  6002. }
  6003. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
  6004. return (QLA_FUNCTION_FAILED);
  6005. if (rval)
  6006. ql_dbg(ql_dbg_disc, vha, 0x206c,
  6007. "%s *** FAILED ***.\n", __func__);
  6008. return (rval);
  6009. }
  6010. /*
  6011. * qla2x00_perform_loop_resync
  6012. * Description: This function will set the appropriate flags and call
  6013. * qla2x00_loop_resync. If successful loop will be resynced
  6014. * Arguments : scsi_qla_host_t pointer
  6015. * returm : Success or Failure
  6016. */
  6017. int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
  6018. {
  6019. int32_t rval = 0;
  6020. if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
  6021. /*Configure the flags so that resync happens properly*/
  6022. atomic_set(&ha->loop_down_timer, 0);
  6023. if (!(ha->device_flags & DFLG_NO_CABLE)) {
  6024. atomic_set(&ha->loop_state, LOOP_UP);
  6025. set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
  6026. set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
  6027. set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
  6028. rval = qla2x00_loop_resync(ha);
  6029. } else
  6030. atomic_set(&ha->loop_state, LOOP_DEAD);
  6031. clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
  6032. }
  6033. return rval;
  6034. }
  6035. void
  6036. qla2x00_update_fcports(scsi_qla_host_t *base_vha)
  6037. {
  6038. fc_port_t *fcport;
  6039. struct scsi_qla_host *vha, *tvp;
  6040. struct qla_hw_data *ha = base_vha->hw;
  6041. unsigned long flags;
  6042. spin_lock_irqsave(&ha->vport_slock, flags);
  6043. /* Go with deferred removal of rport references. */
  6044. list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) {
  6045. atomic_inc(&vha->vref_count);
  6046. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  6047. if (fcport->drport &&
  6048. atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
  6049. spin_unlock_irqrestore(&ha->vport_slock, flags);
  6050. qla2x00_rport_del(fcport);
  6051. spin_lock_irqsave(&ha->vport_slock, flags);
  6052. }
  6053. }
  6054. atomic_dec(&vha->vref_count);
  6055. wake_up(&vha->vref_waitq);
  6056. }
  6057. spin_unlock_irqrestore(&ha->vport_slock, flags);
  6058. }
  6059. /* Assumes idc_lock always held on entry */
  6060. void
  6061. qla83xx_reset_ownership(scsi_qla_host_t *vha)
  6062. {
  6063. struct qla_hw_data *ha = vha->hw;
  6064. uint32_t drv_presence, drv_presence_mask;
  6065. uint32_t dev_part_info1, dev_part_info2, class_type;
  6066. uint32_t class_type_mask = 0x3;
  6067. uint16_t fcoe_other_function = 0xffff, i;
  6068. if (IS_QLA8044(ha)) {
  6069. drv_presence = qla8044_rd_direct(vha,
  6070. QLA8044_CRB_DRV_ACTIVE_INDEX);
  6071. dev_part_info1 = qla8044_rd_direct(vha,
  6072. QLA8044_CRB_DEV_PART_INFO_INDEX);
  6073. dev_part_info2 = qla8044_rd_direct(vha,
  6074. QLA8044_CRB_DEV_PART_INFO2);
  6075. } else {
  6076. qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
  6077. qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
  6078. qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
  6079. }
  6080. for (i = 0; i < 8; i++) {
  6081. class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
  6082. if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
  6083. (i != ha->portnum)) {
  6084. fcoe_other_function = i;
  6085. break;
  6086. }
  6087. }
  6088. if (fcoe_other_function == 0xffff) {
  6089. for (i = 0; i < 8; i++) {
  6090. class_type = ((dev_part_info2 >> (i * 4)) &
  6091. class_type_mask);
  6092. if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
  6093. ((i + 8) != ha->portnum)) {
  6094. fcoe_other_function = i + 8;
  6095. break;
  6096. }
  6097. }
  6098. }
  6099. /*
  6100. * Prepare drv-presence mask based on fcoe functions present.
  6101. * However consider only valid physical fcoe function numbers (0-15).
  6102. */
  6103. drv_presence_mask = ~((1 << (ha->portnum)) |
  6104. ((fcoe_other_function == 0xffff) ?
  6105. 0 : (1 << (fcoe_other_function))));
  6106. /* We are the reset owner iff:
  6107. * - No other protocol drivers present.
  6108. * - This is the lowest among fcoe functions. */
  6109. if (!(drv_presence & drv_presence_mask) &&
  6110. (ha->portnum < fcoe_other_function)) {
  6111. ql_dbg(ql_dbg_p3p, vha, 0xb07f,
  6112. "This host is Reset owner.\n");
  6113. ha->flags.nic_core_reset_owner = 1;
  6114. }
  6115. }
  6116. static int
  6117. __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
  6118. {
  6119. int rval = QLA_SUCCESS;
  6120. struct qla_hw_data *ha = vha->hw;
  6121. uint32_t drv_ack;
  6122. rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
  6123. if (rval == QLA_SUCCESS) {
  6124. drv_ack |= (1 << ha->portnum);
  6125. rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
  6126. }
  6127. return rval;
  6128. }
  6129. static int
  6130. __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
  6131. {
  6132. int rval = QLA_SUCCESS;
  6133. struct qla_hw_data *ha = vha->hw;
  6134. uint32_t drv_ack;
  6135. rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
  6136. if (rval == QLA_SUCCESS) {
  6137. drv_ack &= ~(1 << ha->portnum);
  6138. rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
  6139. }
  6140. return rval;
  6141. }
  6142. /* Assumes idc-lock always held on entry */
  6143. void
  6144. qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
  6145. {
  6146. struct qla_hw_data *ha = vha->hw;
  6147. uint32_t idc_audit_reg = 0, duration_secs = 0;
  6148. switch (audit_type) {
  6149. case IDC_AUDIT_TIMESTAMP:
  6150. ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
  6151. idc_audit_reg = (ha->portnum) |
  6152. (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
  6153. qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
  6154. break;
  6155. case IDC_AUDIT_COMPLETION:
  6156. duration_secs = ((jiffies_to_msecs(jiffies) -
  6157. jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
  6158. idc_audit_reg = (ha->portnum) |
  6159. (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
  6160. qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
  6161. break;
  6162. default:
  6163. ql_log(ql_log_warn, vha, 0xb078,
  6164. "Invalid audit type specified.\n");
  6165. break;
  6166. }
  6167. }
  6168. /* Assumes idc_lock always held on entry */
  6169. static int
  6170. qla83xx_initiating_reset(scsi_qla_host_t *vha)
  6171. {
  6172. struct qla_hw_data *ha = vha->hw;
  6173. uint32_t idc_control, dev_state;
  6174. __qla83xx_get_idc_control(vha, &idc_control);
  6175. if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
  6176. ql_log(ql_log_info, vha, 0xb080,
  6177. "NIC Core reset has been disabled. idc-control=0x%x\n",
  6178. idc_control);
  6179. return QLA_FUNCTION_FAILED;
  6180. }
  6181. /* Set NEED-RESET iff in READY state and we are the reset-owner */
  6182. qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
  6183. if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
  6184. qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
  6185. QLA8XXX_DEV_NEED_RESET);
  6186. ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
  6187. qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
  6188. } else {
  6189. ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n",
  6190. qdev_state(dev_state));
  6191. /* SV: XXX: Is timeout required here? */
  6192. /* Wait for IDC state change READY -> NEED_RESET */
  6193. while (dev_state == QLA8XXX_DEV_READY) {
  6194. qla83xx_idc_unlock(vha, 0);
  6195. msleep(200);
  6196. qla83xx_idc_lock(vha, 0);
  6197. qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
  6198. }
  6199. }
  6200. /* Send IDC ack by writing to drv-ack register */
  6201. __qla83xx_set_drv_ack(vha);
  6202. return QLA_SUCCESS;
  6203. }
  6204. int
  6205. __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
  6206. {
  6207. return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
  6208. }
  6209. int
  6210. __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
  6211. {
  6212. return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
  6213. }
  6214. static int
  6215. qla83xx_check_driver_presence(scsi_qla_host_t *vha)
  6216. {
  6217. uint32_t drv_presence = 0;
  6218. struct qla_hw_data *ha = vha->hw;
  6219. qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
  6220. if (drv_presence & (1 << ha->portnum))
  6221. return QLA_SUCCESS;
  6222. else
  6223. return QLA_TEST_FAILED;
  6224. }
  6225. int
  6226. qla83xx_nic_core_reset(scsi_qla_host_t *vha)
  6227. {
  6228. int rval = QLA_SUCCESS;
  6229. struct qla_hw_data *ha = vha->hw;
  6230. ql_dbg(ql_dbg_p3p, vha, 0xb058,
  6231. "Entered %s().\n", __func__);
  6232. if (vha->device_flags & DFLG_DEV_FAILED) {
  6233. ql_log(ql_log_warn, vha, 0xb059,
  6234. "Device in unrecoverable FAILED state.\n");
  6235. return QLA_FUNCTION_FAILED;
  6236. }
  6237. qla83xx_idc_lock(vha, 0);
  6238. if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
  6239. ql_log(ql_log_warn, vha, 0xb05a,
  6240. "Function=0x%x has been removed from IDC participation.\n",
  6241. ha->portnum);
  6242. rval = QLA_FUNCTION_FAILED;
  6243. goto exit;
  6244. }
  6245. qla83xx_reset_ownership(vha);
  6246. rval = qla83xx_initiating_reset(vha);
  6247. /*
  6248. * Perform reset if we are the reset-owner,
  6249. * else wait till IDC state changes to READY/FAILED.
  6250. */
  6251. if (rval == QLA_SUCCESS) {
  6252. rval = qla83xx_idc_state_handler(vha);
  6253. if (rval == QLA_SUCCESS)
  6254. ha->flags.nic_core_hung = 0;
  6255. __qla83xx_clear_drv_ack(vha);
  6256. }
  6257. exit:
  6258. qla83xx_idc_unlock(vha, 0);
  6259. ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
  6260. return rval;
  6261. }
  6262. int
  6263. qla2xxx_mctp_dump(scsi_qla_host_t *vha)
  6264. {
  6265. struct qla_hw_data *ha = vha->hw;
  6266. int rval = QLA_FUNCTION_FAILED;
  6267. if (!IS_MCTP_CAPABLE(ha)) {
  6268. /* This message can be removed from the final version */
  6269. ql_log(ql_log_info, vha, 0x506d,
  6270. "This board is not MCTP capable\n");
  6271. return rval;
  6272. }
  6273. if (!ha->mctp_dump) {
  6274. ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
  6275. MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
  6276. if (!ha->mctp_dump) {
  6277. ql_log(ql_log_warn, vha, 0x506e,
  6278. "Failed to allocate memory for mctp dump\n");
  6279. return rval;
  6280. }
  6281. }
  6282. #define MCTP_DUMP_STR_ADDR 0x00000000
  6283. rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
  6284. MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
  6285. if (rval != QLA_SUCCESS) {
  6286. ql_log(ql_log_warn, vha, 0x506f,
  6287. "Failed to capture mctp dump\n");
  6288. } else {
  6289. ql_log(ql_log_info, vha, 0x5070,
  6290. "Mctp dump capture for host (%ld/%p).\n",
  6291. vha->host_no, ha->mctp_dump);
  6292. ha->mctp_dumped = 1;
  6293. }
  6294. if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
  6295. ha->flags.nic_core_reset_hdlr_active = 1;
  6296. rval = qla83xx_restart_nic_firmware(vha);
  6297. if (rval)
  6298. /* NIC Core reset failed. */
  6299. ql_log(ql_log_warn, vha, 0x5071,
  6300. "Failed to restart nic firmware\n");
  6301. else
  6302. ql_dbg(ql_dbg_p3p, vha, 0xb084,
  6303. "Restarted NIC firmware successfully.\n");
  6304. ha->flags.nic_core_reset_hdlr_active = 0;
  6305. }
  6306. return rval;
  6307. }
  6308. /*
  6309. * qla2x00_quiesce_io
  6310. * Description: This function will block the new I/Os
  6311. * Its not aborting any I/Os as context
  6312. * is not destroyed during quiescence
  6313. * Arguments: scsi_qla_host_t
  6314. * return : void
  6315. */
  6316. void
  6317. qla2x00_quiesce_io(scsi_qla_host_t *vha)
  6318. {
  6319. struct qla_hw_data *ha = vha->hw;
  6320. struct scsi_qla_host *vp, *tvp;
  6321. unsigned long flags;
  6322. ql_dbg(ql_dbg_dpc, vha, 0x401d,
  6323. "Quiescing I/O - ha=%p.\n", ha);
  6324. atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
  6325. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  6326. atomic_set(&vha->loop_state, LOOP_DOWN);
  6327. qla2x00_mark_all_devices_lost(vha);
  6328. spin_lock_irqsave(&ha->vport_slock, flags);
  6329. list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
  6330. atomic_inc(&vp->vref_count);
  6331. spin_unlock_irqrestore(&ha->vport_slock, flags);
  6332. qla2x00_mark_all_devices_lost(vp);
  6333. spin_lock_irqsave(&ha->vport_slock, flags);
  6334. atomic_dec(&vp->vref_count);
  6335. }
  6336. spin_unlock_irqrestore(&ha->vport_slock, flags);
  6337. } else {
  6338. if (!atomic_read(&vha->loop_down_timer))
  6339. atomic_set(&vha->loop_down_timer,
  6340. LOOP_DOWN_TIME);
  6341. }
  6342. /* Wait for pending cmds to complete */
  6343. WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST)
  6344. != QLA_SUCCESS);
  6345. }
  6346. void
  6347. qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
  6348. {
  6349. struct qla_hw_data *ha = vha->hw;
  6350. struct scsi_qla_host *vp, *tvp;
  6351. unsigned long flags;
  6352. fc_port_t *fcport;
  6353. u16 i;
  6354. /* For ISP82XX, driver waits for completion of the commands.
  6355. * online flag should be set.
  6356. */
  6357. if (!(IS_P3P_TYPE(ha)))
  6358. vha->flags.online = 0;
  6359. ha->flags.chip_reset_done = 0;
  6360. clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  6361. vha->qla_stats.total_isp_aborts++;
  6362. ql_log(ql_log_info, vha, 0x00af,
  6363. "Performing ISP error recovery - ha=%p.\n", ha);
  6364. ha->flags.purge_mbox = 1;
  6365. /* For ISP82XX, reset_chip is just disabling interrupts.
  6366. * Driver waits for the completion of the commands.
  6367. * the interrupts need to be enabled.
  6368. */
  6369. if (!(IS_P3P_TYPE(ha)))
  6370. ha->isp_ops->reset_chip(vha);
  6371. ha->link_data_rate = PORT_SPEED_UNKNOWN;
  6372. SAVE_TOPO(ha);
  6373. ha->flags.rida_fmt2 = 0;
  6374. ha->flags.n2n_ae = 0;
  6375. ha->flags.lip_ae = 0;
  6376. ha->current_topology = 0;
  6377. QLA_FW_STOPPED(ha);
  6378. ha->flags.fw_init_done = 0;
  6379. ha->chip_reset++;
  6380. ha->base_qpair->chip_reset = ha->chip_reset;
  6381. ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0;
  6382. ha->base_qpair->prev_completion_cnt = 0;
  6383. for (i = 0; i < ha->max_qpairs; i++) {
  6384. if (ha->queue_pair_map[i]) {
  6385. ha->queue_pair_map[i]->chip_reset =
  6386. ha->base_qpair->chip_reset;
  6387. ha->queue_pair_map[i]->cmd_cnt =
  6388. ha->queue_pair_map[i]->cmd_completion_cnt = 0;
  6389. ha->base_qpair->prev_completion_cnt = 0;
  6390. }
  6391. }
  6392. /* purge MBox commands */
  6393. spin_lock_irqsave(&ha->hardware_lock, flags);
  6394. if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) {
  6395. clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
  6396. complete(&ha->mbx_intr_comp);
  6397. }
  6398. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  6399. i = 0;
  6400. while (atomic_read(&ha->num_pend_mbx_stage2) ||
  6401. atomic_read(&ha->num_pend_mbx_stage1)) {
  6402. msleep(20);
  6403. i++;
  6404. if (i > 50)
  6405. break;
  6406. }
  6407. ha->flags.purge_mbox = 0;
  6408. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  6409. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  6410. atomic_set(&vha->loop_state, LOOP_DOWN);
  6411. qla2x00_mark_all_devices_lost(vha);
  6412. spin_lock_irqsave(&ha->vport_slock, flags);
  6413. list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
  6414. atomic_inc(&vp->vref_count);
  6415. spin_unlock_irqrestore(&ha->vport_slock, flags);
  6416. qla2x00_mark_all_devices_lost(vp);
  6417. spin_lock_irqsave(&ha->vport_slock, flags);
  6418. atomic_dec(&vp->vref_count);
  6419. }
  6420. spin_unlock_irqrestore(&ha->vport_slock, flags);
  6421. } else {
  6422. if (!atomic_read(&vha->loop_down_timer))
  6423. atomic_set(&vha->loop_down_timer,
  6424. LOOP_DOWN_TIME);
  6425. }
  6426. /* Clear all async request states across all VPs. */
  6427. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  6428. fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
  6429. fcport->scan_state = 0;
  6430. }
  6431. spin_lock_irqsave(&ha->vport_slock, flags);
  6432. list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
  6433. atomic_inc(&vp->vref_count);
  6434. spin_unlock_irqrestore(&ha->vport_slock, flags);
  6435. list_for_each_entry(fcport, &vp->vp_fcports, list)
  6436. fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
  6437. spin_lock_irqsave(&ha->vport_slock, flags);
  6438. atomic_dec(&vp->vref_count);
  6439. }
  6440. spin_unlock_irqrestore(&ha->vport_slock, flags);
  6441. /* Make sure for ISP 82XX IO DMA is complete */
  6442. if (IS_P3P_TYPE(ha)) {
  6443. qla82xx_chip_reset_cleanup(vha);
  6444. ql_log(ql_log_info, vha, 0x00b4,
  6445. "Done chip reset cleanup.\n");
  6446. /* Done waiting for pending commands. Reset online flag */
  6447. vha->flags.online = 0;
  6448. }
  6449. /* Requeue all commands in outstanding command list. */
  6450. qla2x00_abort_all_cmds(vha, DID_RESET << 16);
  6451. /* memory barrier */
  6452. wmb();
  6453. }
  6454. /*
  6455. * qla2x00_abort_isp
  6456. * Resets ISP and aborts all outstanding commands.
  6457. *
  6458. * Input:
  6459. * ha = adapter block pointer.
  6460. *
  6461. * Returns:
  6462. * 0 = success
  6463. */
  6464. int
  6465. qla2x00_abort_isp(scsi_qla_host_t *vha)
  6466. {
  6467. int rval;
  6468. uint8_t status = 0;
  6469. struct qla_hw_data *ha = vha->hw;
  6470. struct scsi_qla_host *vp, *tvp;
  6471. struct req_que *req = ha->req_q_map[0];
  6472. unsigned long flags;
  6473. if (vha->flags.online) {
  6474. qla2x00_abort_isp_cleanup(vha);
  6475. vha->dport_status |= DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
  6476. vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
  6477. if (vha->hw->flags.port_isolated)
  6478. return status;
  6479. if (qla2x00_isp_reg_stat(ha)) {
  6480. ql_log(ql_log_info, vha, 0x803f,
  6481. "ISP Abort - ISP reg disconnect, exiting.\n");
  6482. return status;
  6483. }
  6484. if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
  6485. ha->flags.chip_reset_done = 1;
  6486. vha->flags.online = 1;
  6487. status = 0;
  6488. clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
  6489. return status;
  6490. }
  6491. if (IS_QLA8031(ha)) {
  6492. ql_dbg(ql_dbg_p3p, vha, 0xb05c,
  6493. "Clearing fcoe driver presence.\n");
  6494. if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
  6495. ql_dbg(ql_dbg_p3p, vha, 0xb073,
  6496. "Error while clearing DRV-Presence.\n");
  6497. }
  6498. if (unlikely(pci_channel_offline(ha->pdev) &&
  6499. ha->flags.pci_channel_io_perm_failure)) {
  6500. clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
  6501. status = 0;
  6502. return status;
  6503. }
  6504. switch (vha->qlini_mode) {
  6505. case QLA2XXX_INI_MODE_DISABLED:
  6506. if (!qla_tgt_mode_enabled(vha))
  6507. return 0;
  6508. break;
  6509. case QLA2XXX_INI_MODE_DUAL:
  6510. if (!qla_dual_mode_enabled(vha) &&
  6511. !qla_ini_mode_enabled(vha))
  6512. return 0;
  6513. break;
  6514. case QLA2XXX_INI_MODE_ENABLED:
  6515. default:
  6516. break;
  6517. }
  6518. ha->isp_ops->get_flash_version(vha, req->ring);
  6519. if (qla2x00_isp_reg_stat(ha)) {
  6520. ql_log(ql_log_info, vha, 0x803f,
  6521. "ISP Abort - ISP reg disconnect pre nvram config, exiting.\n");
  6522. return status;
  6523. }
  6524. ha->isp_ops->nvram_config(vha);
  6525. if (qla2x00_isp_reg_stat(ha)) {
  6526. ql_log(ql_log_info, vha, 0x803f,
  6527. "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n");
  6528. return status;
  6529. }
  6530. if (!qla2x00_restart_isp(vha)) {
  6531. clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  6532. if (!atomic_read(&vha->loop_down_timer)) {
  6533. /*
  6534. * Issue marker command only when we are going
  6535. * to start the I/O .
  6536. */
  6537. vha->marker_needed = 1;
  6538. }
  6539. vha->flags.online = 1;
  6540. ha->isp_ops->enable_intrs(ha);
  6541. ha->isp_abort_cnt = 0;
  6542. clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
  6543. if (IS_QLA81XX(ha) || IS_QLA8031(ha))
  6544. qla2x00_get_fw_version(vha);
  6545. if (ha->fce) {
  6546. ha->flags.fce_enabled = 1;
  6547. memset(ha->fce, 0,
  6548. fce_calc_size(ha->fce_bufs));
  6549. rval = qla2x00_enable_fce_trace(vha,
  6550. ha->fce_dma, ha->fce_bufs, ha->fce_mb,
  6551. &ha->fce_bufs);
  6552. if (rval) {
  6553. ql_log(ql_log_warn, vha, 0x8033,
  6554. "Unable to reinitialize FCE "
  6555. "(%d).\n", rval);
  6556. ha->flags.fce_enabled = 0;
  6557. }
  6558. }
  6559. if (ha->eft) {
  6560. memset(ha->eft, 0, EFT_SIZE);
  6561. rval = qla2x00_enable_eft_trace(vha,
  6562. ha->eft_dma, EFT_NUM_BUFFERS);
  6563. if (rval) {
  6564. ql_log(ql_log_warn, vha, 0x8034,
  6565. "Unable to reinitialize EFT "
  6566. "(%d).\n", rval);
  6567. }
  6568. }
  6569. } else { /* failed the ISP abort */
  6570. vha->flags.online = 1;
  6571. if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  6572. if (ha->isp_abort_cnt == 0) {
  6573. ql_log(ql_log_fatal, vha, 0x8035,
  6574. "ISP error recover failed - "
  6575. "board disabled.\n");
  6576. /*
  6577. * The next call disables the board
  6578. * completely.
  6579. */
  6580. qla2x00_abort_isp_cleanup(vha);
  6581. vha->flags.online = 0;
  6582. clear_bit(ISP_ABORT_RETRY,
  6583. &vha->dpc_flags);
  6584. status = 0;
  6585. } else { /* schedule another ISP abort */
  6586. ha->isp_abort_cnt--;
  6587. ql_dbg(ql_dbg_taskm, vha, 0x8020,
  6588. "ISP abort - retry remaining %d.\n",
  6589. ha->isp_abort_cnt);
  6590. status = 1;
  6591. }
  6592. } else {
  6593. ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
  6594. ql_dbg(ql_dbg_taskm, vha, 0x8021,
  6595. "ISP error recovery - retrying (%d) "
  6596. "more times.\n", ha->isp_abort_cnt);
  6597. set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
  6598. status = 1;
  6599. }
  6600. }
  6601. }
  6602. if (vha->hw->flags.port_isolated) {
  6603. qla2x00_abort_isp_cleanup(vha);
  6604. return status;
  6605. }
  6606. if (!status) {
  6607. ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
  6608. qla2x00_configure_hba(vha);
  6609. spin_lock_irqsave(&ha->vport_slock, flags);
  6610. list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
  6611. if (vp->vp_idx) {
  6612. atomic_inc(&vp->vref_count);
  6613. spin_unlock_irqrestore(&ha->vport_slock, flags);
  6614. qla2x00_vp_abort_isp(vp);
  6615. spin_lock_irqsave(&ha->vport_slock, flags);
  6616. atomic_dec(&vp->vref_count);
  6617. }
  6618. }
  6619. spin_unlock_irqrestore(&ha->vport_slock, flags);
  6620. if (IS_QLA8031(ha)) {
  6621. ql_dbg(ql_dbg_p3p, vha, 0xb05d,
  6622. "Setting back fcoe driver presence.\n");
  6623. if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
  6624. ql_dbg(ql_dbg_p3p, vha, 0xb074,
  6625. "Error while setting DRV-Presence.\n");
  6626. }
  6627. } else {
  6628. ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
  6629. __func__);
  6630. }
  6631. return(status);
  6632. }
  6633. /*
  6634. * qla2x00_restart_isp
  6635. * restarts the ISP after a reset
  6636. *
  6637. * Input:
  6638. * ha = adapter block pointer.
  6639. *
  6640. * Returns:
  6641. * 0 = success
  6642. */
  6643. static int
  6644. qla2x00_restart_isp(scsi_qla_host_t *vha)
  6645. {
  6646. int status;
  6647. struct qla_hw_data *ha = vha->hw;
  6648. /* If firmware needs to be loaded */
  6649. if (qla2x00_isp_firmware(vha)) {
  6650. vha->flags.online = 0;
  6651. status = ha->isp_ops->chip_diag(vha);
  6652. if (status)
  6653. return status;
  6654. status = qla2x00_setup_chip(vha);
  6655. if (status)
  6656. return status;
  6657. }
  6658. status = qla2x00_init_rings(vha);
  6659. if (status)
  6660. return status;
  6661. clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  6662. ha->flags.chip_reset_done = 1;
  6663. /* Initialize the queues in use */
  6664. qla25xx_init_queues(ha);
  6665. status = qla2x00_fw_ready(vha);
  6666. if (status) {
  6667. /* if no cable then assume it's good */
  6668. return vha->device_flags & DFLG_NO_CABLE ? 0 : status;
  6669. }
  6670. /* Issue a marker after FW becomes ready. */
  6671. qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
  6672. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  6673. return 0;
  6674. }
  6675. static int
  6676. qla25xx_init_queues(struct qla_hw_data *ha)
  6677. {
  6678. struct rsp_que *rsp = NULL;
  6679. struct req_que *req = NULL;
  6680. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  6681. int ret = -1;
  6682. int i;
  6683. for (i = 1; i < ha->max_rsp_queues; i++) {
  6684. rsp = ha->rsp_q_map[i];
  6685. if (rsp && test_bit(i, ha->rsp_qid_map)) {
  6686. rsp->options &= ~BIT_0;
  6687. ret = qla25xx_init_rsp_que(base_vha, rsp);
  6688. if (ret != QLA_SUCCESS)
  6689. ql_dbg(ql_dbg_init, base_vha, 0x00ff,
  6690. "%s Rsp que: %d init failed.\n",
  6691. __func__, rsp->id);
  6692. else
  6693. ql_dbg(ql_dbg_init, base_vha, 0x0100,
  6694. "%s Rsp que: %d inited.\n",
  6695. __func__, rsp->id);
  6696. }
  6697. }
  6698. for (i = 1; i < ha->max_req_queues; i++) {
  6699. req = ha->req_q_map[i];
  6700. if (req && test_bit(i, ha->req_qid_map)) {
  6701. /* Clear outstanding commands array. */
  6702. req->options &= ~BIT_0;
  6703. ret = qla25xx_init_req_que(base_vha, req);
  6704. if (ret != QLA_SUCCESS)
  6705. ql_dbg(ql_dbg_init, base_vha, 0x0101,
  6706. "%s Req que: %d init failed.\n",
  6707. __func__, req->id);
  6708. else
  6709. ql_dbg(ql_dbg_init, base_vha, 0x0102,
  6710. "%s Req que: %d inited.\n",
  6711. __func__, req->id);
  6712. }
  6713. }
  6714. return ret;
  6715. }
  6716. /*
  6717. * qla2x00_reset_adapter
  6718. * Reset adapter.
  6719. *
  6720. * Input:
  6721. * ha = adapter block pointer.
  6722. */
  6723. int
  6724. qla2x00_reset_adapter(scsi_qla_host_t *vha)
  6725. {
  6726. unsigned long flags = 0;
  6727. struct qla_hw_data *ha = vha->hw;
  6728. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  6729. vha->flags.online = 0;
  6730. ha->isp_ops->disable_intrs(ha);
  6731. spin_lock_irqsave(&ha->hardware_lock, flags);
  6732. wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
  6733. rd_reg_word(&reg->hccr); /* PCI Posting. */
  6734. wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
  6735. rd_reg_word(&reg->hccr); /* PCI Posting. */
  6736. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  6737. return QLA_SUCCESS;
  6738. }
  6739. int
  6740. qla24xx_reset_adapter(scsi_qla_host_t *vha)
  6741. {
  6742. unsigned long flags = 0;
  6743. struct qla_hw_data *ha = vha->hw;
  6744. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  6745. if (IS_P3P_TYPE(ha))
  6746. return QLA_SUCCESS;
  6747. vha->flags.online = 0;
  6748. ha->isp_ops->disable_intrs(ha);
  6749. spin_lock_irqsave(&ha->hardware_lock, flags);
  6750. wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
  6751. rd_reg_dword(&reg->hccr);
  6752. wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
  6753. rd_reg_dword(&reg->hccr);
  6754. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  6755. if (IS_NOPOLLING_TYPE(ha))
  6756. ha->isp_ops->enable_intrs(ha);
  6757. return QLA_SUCCESS;
  6758. }
  6759. /* On sparc systems, obtain port and node WWN from firmware
  6760. * properties.
  6761. */
  6762. static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
  6763. struct nvram_24xx *nv)
  6764. {
  6765. #ifdef CONFIG_SPARC
  6766. struct qla_hw_data *ha = vha->hw;
  6767. struct pci_dev *pdev = ha->pdev;
  6768. struct device_node *dp = pci_device_to_OF_node(pdev);
  6769. const u8 *val;
  6770. int len;
  6771. val = of_get_property(dp, "port-wwn", &len);
  6772. if (val && len >= WWN_SIZE)
  6773. memcpy(nv->port_name, val, WWN_SIZE);
  6774. val = of_get_property(dp, "node-wwn", &len);
  6775. if (val && len >= WWN_SIZE)
  6776. memcpy(nv->node_name, val, WWN_SIZE);
  6777. #endif
  6778. }
  6779. int
  6780. qla24xx_nvram_config(scsi_qla_host_t *vha)
  6781. {
  6782. int rval;
  6783. struct init_cb_24xx *icb;
  6784. struct nvram_24xx *nv;
  6785. __le32 *dptr;
  6786. uint8_t *dptr1, *dptr2;
  6787. uint32_t chksum;
  6788. uint16_t cnt;
  6789. struct qla_hw_data *ha = vha->hw;
  6790. rval = QLA_SUCCESS;
  6791. icb = (struct init_cb_24xx *)ha->init_cb;
  6792. nv = ha->nvram;
  6793. /* Determine NVRAM starting address. */
  6794. if (ha->port_no == 0) {
  6795. ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
  6796. ha->vpd_base = FA_NVRAM_VPD0_ADDR;
  6797. } else {
  6798. ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
  6799. ha->vpd_base = FA_NVRAM_VPD1_ADDR;
  6800. }
  6801. ha->nvram_size = sizeof(*nv);
  6802. ha->vpd_size = FA_NVRAM_VPD_SIZE;
  6803. /* Get VPD data into cache */
  6804. ha->vpd = ha->nvram + VPD_OFFSET;
  6805. ha->isp_ops->read_nvram(vha, ha->vpd,
  6806. ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
  6807. /* Get NVRAM data into cache and calculate checksum. */
  6808. dptr = (__force __le32 *)nv;
  6809. ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
  6810. for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
  6811. chksum += le32_to_cpu(*dptr);
  6812. ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
  6813. "Contents of NVRAM\n");
  6814. ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
  6815. nv, ha->nvram_size);
  6816. /* Bad NVRAM data, set defaults parameters. */
  6817. if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
  6818. le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
  6819. /* Reset NVRAM data. */
  6820. ql_log(ql_log_warn, vha, 0x006b,
  6821. "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
  6822. chksum, nv->id, nv->nvram_version);
  6823. ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv));
  6824. ql_log(ql_log_warn, vha, 0x006c,
  6825. "Falling back to functioning (yet invalid -- WWPN) "
  6826. "defaults.\n");
  6827. /*
  6828. * Set default initialization control block.
  6829. */
  6830. memset(nv, 0, ha->nvram_size);
  6831. nv->nvram_version = cpu_to_le16(ICB_VERSION);
  6832. nv->version = cpu_to_le16(ICB_VERSION);
  6833. nv->frame_payload_size = cpu_to_le16(2048);
  6834. nv->execution_throttle = cpu_to_le16(0xFFFF);
  6835. nv->exchange_count = cpu_to_le16(0);
  6836. nv->hard_address = cpu_to_le16(124);
  6837. nv->port_name[0] = 0x21;
  6838. nv->port_name[1] = 0x00 + ha->port_no + 1;
  6839. nv->port_name[2] = 0x00;
  6840. nv->port_name[3] = 0xe0;
  6841. nv->port_name[4] = 0x8b;
  6842. nv->port_name[5] = 0x1c;
  6843. nv->port_name[6] = 0x55;
  6844. nv->port_name[7] = 0x86;
  6845. nv->node_name[0] = 0x20;
  6846. nv->node_name[1] = 0x00;
  6847. nv->node_name[2] = 0x00;
  6848. nv->node_name[3] = 0xe0;
  6849. nv->node_name[4] = 0x8b;
  6850. nv->node_name[5] = 0x1c;
  6851. nv->node_name[6] = 0x55;
  6852. nv->node_name[7] = 0x86;
  6853. qla24xx_nvram_wwn_from_ofw(vha, nv);
  6854. nv->login_retry_count = cpu_to_le16(8);
  6855. nv->interrupt_delay_timer = cpu_to_le16(0);
  6856. nv->login_timeout = cpu_to_le16(0);
  6857. nv->firmware_options_1 =
  6858. cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
  6859. nv->firmware_options_2 = cpu_to_le32(2 << 4);
  6860. nv->firmware_options_2 |= cpu_to_le32(BIT_12);
  6861. nv->firmware_options_3 = cpu_to_le32(2 << 13);
  6862. nv->host_p = cpu_to_le32(BIT_11|BIT_10);
  6863. nv->efi_parameters = cpu_to_le32(0);
  6864. nv->reset_delay = 5;
  6865. nv->max_luns_per_target = cpu_to_le16(128);
  6866. nv->port_down_retry_count = cpu_to_le16(30);
  6867. nv->link_down_timeout = cpu_to_le16(30);
  6868. rval = 1;
  6869. }
  6870. if (qla_tgt_mode_enabled(vha)) {
  6871. /* Don't enable full login after initial LIP */
  6872. nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
  6873. /* Don't enable LIP full login for initiator */
  6874. nv->host_p &= cpu_to_le32(~BIT_10);
  6875. }
  6876. qlt_24xx_config_nvram_stage1(vha, nv);
  6877. /* Reset Initialization control block */
  6878. memset(icb, 0, ha->init_cb_size);
  6879. /* Copy 1st segment. */
  6880. dptr1 = (uint8_t *)icb;
  6881. dptr2 = (uint8_t *)&nv->version;
  6882. cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
  6883. while (cnt--)
  6884. *dptr1++ = *dptr2++;
  6885. icb->login_retry_count = nv->login_retry_count;
  6886. icb->link_down_on_nos = nv->link_down_on_nos;
  6887. /* Copy 2nd segment. */
  6888. dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
  6889. dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
  6890. cnt = (uint8_t *)&icb->reserved_3 -
  6891. (uint8_t *)&icb->interrupt_delay_timer;
  6892. while (cnt--)
  6893. *dptr1++ = *dptr2++;
  6894. ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
  6895. /*
  6896. * Setup driver NVRAM options.
  6897. */
  6898. qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
  6899. "QLA2462");
  6900. qlt_24xx_config_nvram_stage2(vha, icb);
  6901. if (nv->host_p & cpu_to_le32(BIT_15)) {
  6902. /* Use alternate WWN? */
  6903. memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
  6904. memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
  6905. }
  6906. /* Prepare nodename */
  6907. if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
  6908. /*
  6909. * Firmware will apply the following mask if the nodename was
  6910. * not provided.
  6911. */
  6912. memcpy(icb->node_name, icb->port_name, WWN_SIZE);
  6913. icb->node_name[0] &= 0xF0;
  6914. }
  6915. /* Set host adapter parameters. */
  6916. ha->flags.disable_risc_code_load = 0;
  6917. ha->flags.enable_lip_reset = 0;
  6918. ha->flags.enable_lip_full_login =
  6919. le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
  6920. ha->flags.enable_target_reset =
  6921. le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
  6922. ha->flags.enable_led_scheme = 0;
  6923. ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
  6924. ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
  6925. (BIT_6 | BIT_5 | BIT_4)) >> 4;
  6926. memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
  6927. sizeof(ha->fw_seriallink_options24));
  6928. /* save HBA serial number */
  6929. ha->serial0 = icb->port_name[5];
  6930. ha->serial1 = icb->port_name[6];
  6931. ha->serial2 = icb->port_name[7];
  6932. memcpy(vha->node_name, icb->node_name, WWN_SIZE);
  6933. memcpy(vha->port_name, icb->port_name, WWN_SIZE);
  6934. icb->execution_throttle = cpu_to_le16(0xFFFF);
  6935. ha->retry_count = le16_to_cpu(nv->login_retry_count);
  6936. /* Set minimum login_timeout to 4 seconds. */
  6937. if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
  6938. nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
  6939. if (le16_to_cpu(nv->login_timeout) < 4)
  6940. nv->login_timeout = cpu_to_le16(4);
  6941. ha->login_timeout = le16_to_cpu(nv->login_timeout);
  6942. /* Set minimum RATOV to 100 tenths of a second. */
  6943. ha->r_a_tov = 100;
  6944. ha->loop_reset_delay = nv->reset_delay;
  6945. /* Link Down Timeout = 0:
  6946. *
  6947. * When Port Down timer expires we will start returning
  6948. * I/O's to OS with "DID_NO_CONNECT".
  6949. *
  6950. * Link Down Timeout != 0:
  6951. *
  6952. * The driver waits for the link to come up after link down
  6953. * before returning I/Os to OS with "DID_NO_CONNECT".
  6954. */
  6955. if (le16_to_cpu(nv->link_down_timeout) == 0) {
  6956. ha->loop_down_abort_time =
  6957. (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
  6958. } else {
  6959. ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
  6960. ha->loop_down_abort_time =
  6961. (LOOP_DOWN_TIME - ha->link_down_timeout);
  6962. }
  6963. /* Need enough time to try and get the port back. */
  6964. ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
  6965. if (qlport_down_retry)
  6966. ha->port_down_retry_count = qlport_down_retry;
  6967. /* Set login_retry_count */
  6968. ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
  6969. if (ha->port_down_retry_count ==
  6970. le16_to_cpu(nv->port_down_retry_count) &&
  6971. ha->port_down_retry_count > 3)
  6972. ha->login_retry_count = ha->port_down_retry_count;
  6973. else if (ha->port_down_retry_count > (int)ha->login_retry_count)
  6974. ha->login_retry_count = ha->port_down_retry_count;
  6975. if (ql2xloginretrycount)
  6976. ha->login_retry_count = ql2xloginretrycount;
  6977. /* N2N: driver will initiate Login instead of FW */
  6978. icb->firmware_options_3 |= cpu_to_le32(BIT_8);
  6979. /* Enable ZIO. */
  6980. if (!vha->flags.init_done) {
  6981. ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
  6982. (BIT_3 | BIT_2 | BIT_1 | BIT_0);
  6983. ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
  6984. le16_to_cpu(icb->interrupt_delay_timer) : 2;
  6985. }
  6986. icb->firmware_options_2 &= cpu_to_le32(
  6987. ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
  6988. if (ha->zio_mode != QLA_ZIO_DISABLED) {
  6989. ha->zio_mode = QLA_ZIO_MODE_6;
  6990. ql_log(ql_log_info, vha, 0x006f,
  6991. "ZIO mode %d enabled; timer delay (%d us).\n",
  6992. ha->zio_mode, ha->zio_timer * 100);
  6993. icb->firmware_options_2 |= cpu_to_le32(
  6994. (uint32_t)ha->zio_mode);
  6995. icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
  6996. }
  6997. if (rval) {
  6998. ql_log(ql_log_warn, vha, 0x0070,
  6999. "NVRAM configuration failed.\n");
  7000. }
  7001. return (rval);
  7002. }
  7003. static void
  7004. qla27xx_print_image(struct scsi_qla_host *vha, char *name,
  7005. struct qla27xx_image_status *image_status)
  7006. {
  7007. ql_dbg(ql_dbg_init, vha, 0x018b,
  7008. "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n",
  7009. name, "status",
  7010. image_status->image_status_mask,
  7011. le16_to_cpu(image_status->generation),
  7012. image_status->ver_major,
  7013. image_status->ver_minor,
  7014. image_status->bitmap,
  7015. le32_to_cpu(image_status->checksum),
  7016. le32_to_cpu(image_status->signature));
  7017. }
  7018. static bool
  7019. qla28xx_check_aux_image_status_signature(
  7020. struct qla27xx_image_status *image_status)
  7021. {
  7022. ulong signature = le32_to_cpu(image_status->signature);
  7023. return signature != QLA28XX_AUX_IMG_STATUS_SIGN;
  7024. }
  7025. static bool
  7026. qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
  7027. {
  7028. ulong signature = le32_to_cpu(image_status->signature);
  7029. return
  7030. signature != QLA27XX_IMG_STATUS_SIGN &&
  7031. signature != QLA28XX_IMG_STATUS_SIGN;
  7032. }
  7033. static ulong
  7034. qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
  7035. {
  7036. __le32 *p = (__force __le32 *)image_status;
  7037. uint n = sizeof(*image_status) / sizeof(*p);
  7038. uint32_t sum = 0;
  7039. for ( ; n--; p++)
  7040. sum += le32_to_cpup(p);
  7041. return sum;
  7042. }
  7043. static inline uint
  7044. qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask)
  7045. {
  7046. return aux->bitmap & bitmask ?
  7047. QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE;
  7048. }
  7049. static void
  7050. qla28xx_component_status(
  7051. struct active_regions *active_regions, struct qla27xx_image_status *aux)
  7052. {
  7053. active_regions->aux.board_config =
  7054. qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG);
  7055. active_regions->aux.vpd_nvram =
  7056. qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM);
  7057. active_regions->aux.npiv_config_0_1 =
  7058. qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1);
  7059. active_regions->aux.npiv_config_2_3 =
  7060. qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
  7061. active_regions->aux.nvme_params =
  7062. qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NVME_PARAMS);
  7063. }
  7064. static int
  7065. qla27xx_compare_image_generation(
  7066. struct qla27xx_image_status *pri_image_status,
  7067. struct qla27xx_image_status *sec_image_status)
  7068. {
  7069. /* calculate generation delta as uint16 (this accounts for wrap) */
  7070. int16_t delta =
  7071. le16_to_cpu(pri_image_status->generation) -
  7072. le16_to_cpu(sec_image_status->generation);
  7073. ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta);
  7074. return delta;
  7075. }
  7076. void
  7077. qla28xx_get_aux_images(
  7078. struct scsi_qla_host *vha, struct active_regions *active_regions)
  7079. {
  7080. struct qla_hw_data *ha = vha->hw;
  7081. struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
  7082. bool valid_pri_image = false, valid_sec_image = false;
  7083. bool active_pri_image = false, active_sec_image = false;
  7084. if (!ha->flt_region_aux_img_status_pri) {
  7085. ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
  7086. goto check_sec_image;
  7087. }
  7088. qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
  7089. ha->flt_region_aux_img_status_pri,
  7090. sizeof(pri_aux_image_status) >> 2);
  7091. qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
  7092. if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
  7093. ql_dbg(ql_dbg_init, vha, 0x018b,
  7094. "Primary aux image signature (%#x) not valid\n",
  7095. le32_to_cpu(pri_aux_image_status.signature));
  7096. goto check_sec_image;
  7097. }
  7098. if (qla27xx_image_status_checksum(&pri_aux_image_status)) {
  7099. ql_dbg(ql_dbg_init, vha, 0x018c,
  7100. "Primary aux image checksum failed\n");
  7101. goto check_sec_image;
  7102. }
  7103. valid_pri_image = true;
  7104. if (pri_aux_image_status.image_status_mask & 1) {
  7105. ql_dbg(ql_dbg_init, vha, 0x018d,
  7106. "Primary aux image is active\n");
  7107. active_pri_image = true;
  7108. }
  7109. check_sec_image:
  7110. if (!ha->flt_region_aux_img_status_sec) {
  7111. ql_dbg(ql_dbg_init, vha, 0x018a,
  7112. "Secondary aux image not addressed\n");
  7113. goto check_valid_image;
  7114. }
  7115. qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
  7116. ha->flt_region_aux_img_status_sec,
  7117. sizeof(sec_aux_image_status) >> 2);
  7118. qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
  7119. if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
  7120. ql_dbg(ql_dbg_init, vha, 0x018b,
  7121. "Secondary aux image signature (%#x) not valid\n",
  7122. le32_to_cpu(sec_aux_image_status.signature));
  7123. goto check_valid_image;
  7124. }
  7125. if (qla27xx_image_status_checksum(&sec_aux_image_status)) {
  7126. ql_dbg(ql_dbg_init, vha, 0x018c,
  7127. "Secondary aux image checksum failed\n");
  7128. goto check_valid_image;
  7129. }
  7130. valid_sec_image = true;
  7131. if (sec_aux_image_status.image_status_mask & 1) {
  7132. ql_dbg(ql_dbg_init, vha, 0x018d,
  7133. "Secondary aux image is active\n");
  7134. active_sec_image = true;
  7135. }
  7136. check_valid_image:
  7137. if (valid_pri_image && active_pri_image &&
  7138. valid_sec_image && active_sec_image) {
  7139. if (qla27xx_compare_image_generation(&pri_aux_image_status,
  7140. &sec_aux_image_status) >= 0) {
  7141. qla28xx_component_status(active_regions,
  7142. &pri_aux_image_status);
  7143. } else {
  7144. qla28xx_component_status(active_regions,
  7145. &sec_aux_image_status);
  7146. }
  7147. } else if (valid_pri_image && active_pri_image) {
  7148. qla28xx_component_status(active_regions, &pri_aux_image_status);
  7149. } else if (valid_sec_image && active_sec_image) {
  7150. qla28xx_component_status(active_regions, &sec_aux_image_status);
  7151. }
  7152. ql_dbg(ql_dbg_init, vha, 0x018f,
  7153. "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u, NVME=%u\n",
  7154. active_regions->aux.board_config,
  7155. active_regions->aux.vpd_nvram,
  7156. active_regions->aux.npiv_config_0_1,
  7157. active_regions->aux.npiv_config_2_3,
  7158. active_regions->aux.nvme_params);
  7159. }
  7160. void
  7161. qla27xx_get_active_image(struct scsi_qla_host *vha,
  7162. struct active_regions *active_regions)
  7163. {
  7164. struct qla_hw_data *ha = vha->hw;
  7165. struct qla27xx_image_status pri_image_status, sec_image_status;
  7166. bool valid_pri_image = false, valid_sec_image = false;
  7167. bool active_pri_image = false, active_sec_image = false;
  7168. if (!ha->flt_region_img_status_pri) {
  7169. ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
  7170. goto check_sec_image;
  7171. }
  7172. if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status,
  7173. ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
  7174. QLA_SUCCESS) {
  7175. WARN_ON_ONCE(true);
  7176. goto check_sec_image;
  7177. }
  7178. qla27xx_print_image(vha, "Primary image", &pri_image_status);
  7179. if (qla27xx_check_image_status_signature(&pri_image_status)) {
  7180. ql_dbg(ql_dbg_init, vha, 0x018b,
  7181. "Primary image signature (%#x) not valid\n",
  7182. le32_to_cpu(pri_image_status.signature));
  7183. goto check_sec_image;
  7184. }
  7185. if (qla27xx_image_status_checksum(&pri_image_status)) {
  7186. ql_dbg(ql_dbg_init, vha, 0x018c,
  7187. "Primary image checksum failed\n");
  7188. goto check_sec_image;
  7189. }
  7190. valid_pri_image = true;
  7191. if (pri_image_status.image_status_mask & 1) {
  7192. ql_dbg(ql_dbg_init, vha, 0x018d,
  7193. "Primary image is active\n");
  7194. active_pri_image = true;
  7195. }
  7196. check_sec_image:
  7197. if (!ha->flt_region_img_status_sec) {
  7198. ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n");
  7199. goto check_valid_image;
  7200. }
  7201. qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
  7202. ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
  7203. qla27xx_print_image(vha, "Secondary image", &sec_image_status);
  7204. if (qla27xx_check_image_status_signature(&sec_image_status)) {
  7205. ql_dbg(ql_dbg_init, vha, 0x018b,
  7206. "Secondary image signature (%#x) not valid\n",
  7207. le32_to_cpu(sec_image_status.signature));
  7208. goto check_valid_image;
  7209. }
  7210. if (qla27xx_image_status_checksum(&sec_image_status)) {
  7211. ql_dbg(ql_dbg_init, vha, 0x018c,
  7212. "Secondary image checksum failed\n");
  7213. goto check_valid_image;
  7214. }
  7215. valid_sec_image = true;
  7216. if (sec_image_status.image_status_mask & 1) {
  7217. ql_dbg(ql_dbg_init, vha, 0x018d,
  7218. "Secondary image is active\n");
  7219. active_sec_image = true;
  7220. }
  7221. check_valid_image:
  7222. if (valid_pri_image && active_pri_image)
  7223. active_regions->global = QLA27XX_PRIMARY_IMAGE;
  7224. if (valid_sec_image && active_sec_image) {
  7225. if (!active_regions->global ||
  7226. qla27xx_compare_image_generation(
  7227. &pri_image_status, &sec_image_status) < 0) {
  7228. active_regions->global = QLA27XX_SECONDARY_IMAGE;
  7229. }
  7230. }
  7231. ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n",
  7232. active_regions->global == QLA27XX_DEFAULT_IMAGE ?
  7233. "default (boot/fw)" :
  7234. active_regions->global == QLA27XX_PRIMARY_IMAGE ?
  7235. "primary" :
  7236. active_regions->global == QLA27XX_SECONDARY_IMAGE ?
  7237. "secondary" : "invalid",
  7238. active_regions->global);
  7239. }
  7240. bool qla24xx_risc_firmware_invalid(uint32_t *dword)
  7241. {
  7242. return
  7243. !(dword[4] | dword[5] | dword[6] | dword[7]) ||
  7244. !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]);
  7245. }
  7246. static int
  7247. qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
  7248. uint32_t faddr)
  7249. {
  7250. int rval;
  7251. uint templates, segments, fragment;
  7252. ulong i;
  7253. uint j;
  7254. ulong dlen;
  7255. uint32_t *dcode;
  7256. uint32_t risc_addr, risc_size, risc_attr = 0;
  7257. struct qla_hw_data *ha = vha->hw;
  7258. struct req_que *req = ha->req_q_map[0];
  7259. struct fwdt *fwdt = ha->fwdt;
  7260. ql_dbg(ql_dbg_init, vha, 0x008b,
  7261. "FW: Loading firmware from flash (%x).\n", faddr);
  7262. dcode = (uint32_t *)req->ring;
  7263. qla24xx_read_flash_data(vha, dcode, faddr, 8);
  7264. if (qla24xx_risc_firmware_invalid(dcode)) {
  7265. ql_log(ql_log_fatal, vha, 0x008c,
  7266. "Unable to verify the integrity of flash firmware "
  7267. "image.\n");
  7268. ql_log(ql_log_fatal, vha, 0x008d,
  7269. "Firmware data: %08x %08x %08x %08x.\n",
  7270. dcode[0], dcode[1], dcode[2], dcode[3]);
  7271. return QLA_FUNCTION_FAILED;
  7272. }
  7273. dcode = (uint32_t *)req->ring;
  7274. *srisc_addr = 0;
  7275. segments = FA_RISC_CODE_SEGMENTS;
  7276. for (j = 0; j < segments; j++) {
  7277. ql_dbg(ql_dbg_init, vha, 0x008d,
  7278. "-> Loading segment %u...\n", j);
  7279. qla24xx_read_flash_data(vha, dcode, faddr, 10);
  7280. risc_addr = be32_to_cpu((__force __be32)dcode[2]);
  7281. risc_size = be32_to_cpu((__force __be32)dcode[3]);
  7282. if (!*srisc_addr) {
  7283. *srisc_addr = risc_addr;
  7284. risc_attr = be32_to_cpu((__force __be32)dcode[9]);
  7285. }
  7286. dlen = ha->fw_transfer_size >> 2;
  7287. for (fragment = 0; risc_size; fragment++) {
  7288. if (dlen > risc_size)
  7289. dlen = risc_size;
  7290. ql_dbg(ql_dbg_init, vha, 0x008e,
  7291. "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
  7292. fragment, risc_addr, faddr, dlen);
  7293. qla24xx_read_flash_data(vha, dcode, faddr, dlen);
  7294. for (i = 0; i < dlen; i++)
  7295. dcode[i] = swab32(dcode[i]);
  7296. rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
  7297. if (rval) {
  7298. ql_log(ql_log_fatal, vha, 0x008f,
  7299. "-> Failed load firmware fragment %u.\n",
  7300. fragment);
  7301. return QLA_FUNCTION_FAILED;
  7302. }
  7303. faddr += dlen;
  7304. risc_addr += dlen;
  7305. risc_size -= dlen;
  7306. }
  7307. }
  7308. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  7309. return QLA_SUCCESS;
  7310. templates = (risc_attr & BIT_9) ? 2 : 1;
  7311. ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
  7312. for (j = 0; j < templates; j++, fwdt++) {
  7313. vfree(fwdt->template);
  7314. fwdt->template = NULL;
  7315. fwdt->length = 0;
  7316. dcode = (uint32_t *)req->ring;
  7317. qla24xx_read_flash_data(vha, dcode, faddr, 7);
  7318. risc_size = be32_to_cpu((__force __be32)dcode[2]);
  7319. ql_dbg(ql_dbg_init, vha, 0x0161,
  7320. "-> fwdt%u template array at %#x (%#x dwords)\n",
  7321. j, faddr, risc_size);
  7322. if (!risc_size || !~risc_size) {
  7323. ql_dbg(ql_dbg_init, vha, 0x0162,
  7324. "-> fwdt%u failed to read array\n", j);
  7325. goto failed;
  7326. }
  7327. /* skip header and ignore checksum */
  7328. faddr += 7;
  7329. risc_size -= 8;
  7330. ql_dbg(ql_dbg_init, vha, 0x0163,
  7331. "-> fwdt%u template allocate template %#x words...\n",
  7332. j, risc_size);
  7333. fwdt->template = vmalloc(risc_size * sizeof(*dcode));
  7334. if (!fwdt->template) {
  7335. ql_log(ql_log_warn, vha, 0x0164,
  7336. "-> fwdt%u failed allocate template.\n", j);
  7337. goto failed;
  7338. }
  7339. dcode = fwdt->template;
  7340. qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
  7341. if (!qla27xx_fwdt_template_valid(dcode)) {
  7342. ql_log(ql_log_warn, vha, 0x0165,
  7343. "-> fwdt%u failed template validate\n", j);
  7344. goto failed;
  7345. }
  7346. dlen = qla27xx_fwdt_template_size(dcode);
  7347. ql_dbg(ql_dbg_init, vha, 0x0166,
  7348. "-> fwdt%u template size %#lx bytes (%#lx words)\n",
  7349. j, dlen, dlen / sizeof(*dcode));
  7350. if (dlen > risc_size * sizeof(*dcode)) {
  7351. ql_log(ql_log_warn, vha, 0x0167,
  7352. "-> fwdt%u template exceeds array (%-lu bytes)\n",
  7353. j, dlen - risc_size * sizeof(*dcode));
  7354. goto failed;
  7355. }
  7356. fwdt->length = dlen;
  7357. ql_dbg(ql_dbg_init, vha, 0x0168,
  7358. "-> fwdt%u loaded template ok\n", j);
  7359. faddr += risc_size + 1;
  7360. }
  7361. return QLA_SUCCESS;
  7362. failed:
  7363. vfree(fwdt->template);
  7364. fwdt->template = NULL;
  7365. fwdt->length = 0;
  7366. return QLA_SUCCESS;
  7367. }
  7368. #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
  7369. int
  7370. qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
  7371. {
  7372. int rval;
  7373. int i, fragment;
  7374. uint16_t *wcode;
  7375. __be16 *fwcode;
  7376. uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
  7377. struct fw_blob *blob;
  7378. struct qla_hw_data *ha = vha->hw;
  7379. struct req_que *req = ha->req_q_map[0];
  7380. /* Load firmware blob. */
  7381. blob = qla2x00_request_firmware(vha);
  7382. if (!blob) {
  7383. ql_log(ql_log_info, vha, 0x0083,
  7384. "Firmware image unavailable.\n");
  7385. ql_log(ql_log_info, vha, 0x0084,
  7386. "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
  7387. return QLA_FUNCTION_FAILED;
  7388. }
  7389. rval = QLA_SUCCESS;
  7390. wcode = (uint16_t *)req->ring;
  7391. *srisc_addr = 0;
  7392. fwcode = (__force __be16 *)blob->fw->data;
  7393. fwclen = 0;
  7394. /* Validate firmware image by checking version. */
  7395. if (blob->fw->size < 8 * sizeof(uint16_t)) {
  7396. ql_log(ql_log_fatal, vha, 0x0085,
  7397. "Unable to verify integrity of firmware image (%zd).\n",
  7398. blob->fw->size);
  7399. goto fail_fw_integrity;
  7400. }
  7401. for (i = 0; i < 4; i++)
  7402. wcode[i] = be16_to_cpu(fwcode[i + 4]);
  7403. if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
  7404. wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
  7405. wcode[2] == 0 && wcode[3] == 0)) {
  7406. ql_log(ql_log_fatal, vha, 0x0086,
  7407. "Unable to verify integrity of firmware image.\n");
  7408. ql_log(ql_log_fatal, vha, 0x0087,
  7409. "Firmware data: %04x %04x %04x %04x.\n",
  7410. wcode[0], wcode[1], wcode[2], wcode[3]);
  7411. goto fail_fw_integrity;
  7412. }
  7413. seg = blob->segs;
  7414. while (*seg && rval == QLA_SUCCESS) {
  7415. risc_addr = *seg;
  7416. *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
  7417. risc_size = be16_to_cpu(fwcode[3]);
  7418. /* Validate firmware image size. */
  7419. fwclen += risc_size * sizeof(uint16_t);
  7420. if (blob->fw->size < fwclen) {
  7421. ql_log(ql_log_fatal, vha, 0x0088,
  7422. "Unable to verify integrity of firmware image "
  7423. "(%zd).\n", blob->fw->size);
  7424. goto fail_fw_integrity;
  7425. }
  7426. fragment = 0;
  7427. while (risc_size > 0 && rval == QLA_SUCCESS) {
  7428. wlen = (uint16_t)(ha->fw_transfer_size >> 1);
  7429. if (wlen > risc_size)
  7430. wlen = risc_size;
  7431. ql_dbg(ql_dbg_init, vha, 0x0089,
  7432. "Loading risc segment@ risc addr %x number of "
  7433. "words 0x%x.\n", risc_addr, wlen);
  7434. for (i = 0; i < wlen; i++)
  7435. wcode[i] = swab16((__force u32)fwcode[i]);
  7436. rval = qla2x00_load_ram(vha, req->dma, risc_addr,
  7437. wlen);
  7438. if (rval) {
  7439. ql_log(ql_log_fatal, vha, 0x008a,
  7440. "Failed to load segment %d of firmware.\n",
  7441. fragment);
  7442. break;
  7443. }
  7444. fwcode += wlen;
  7445. risc_addr += wlen;
  7446. risc_size -= wlen;
  7447. fragment++;
  7448. }
  7449. /* Next segment. */
  7450. seg++;
  7451. }
  7452. return rval;
  7453. fail_fw_integrity:
  7454. return QLA_FUNCTION_FAILED;
  7455. }
  7456. static int
  7457. qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
  7458. {
  7459. int rval;
  7460. uint templates, segments, fragment;
  7461. uint32_t *dcode;
  7462. ulong dlen;
  7463. uint32_t risc_addr, risc_size, risc_attr = 0;
  7464. ulong i;
  7465. uint j;
  7466. struct fw_blob *blob;
  7467. __be32 *fwcode;
  7468. struct qla_hw_data *ha = vha->hw;
  7469. struct req_que *req = ha->req_q_map[0];
  7470. struct fwdt *fwdt = ha->fwdt;
  7471. ql_dbg(ql_dbg_init, vha, 0x0090,
  7472. "-> FW: Loading via request-firmware.\n");
  7473. blob = qla2x00_request_firmware(vha);
  7474. if (!blob) {
  7475. ql_log(ql_log_warn, vha, 0x0092,
  7476. "-> Firmware file not found.\n");
  7477. return QLA_FUNCTION_FAILED;
  7478. }
  7479. fwcode = (__force __be32 *)blob->fw->data;
  7480. dcode = (__force uint32_t *)fwcode;
  7481. if (qla24xx_risc_firmware_invalid(dcode)) {
  7482. ql_log(ql_log_fatal, vha, 0x0093,
  7483. "Unable to verify integrity of firmware image (%zd).\n",
  7484. blob->fw->size);
  7485. ql_log(ql_log_fatal, vha, 0x0095,
  7486. "Firmware data: %08x %08x %08x %08x.\n",
  7487. dcode[0], dcode[1], dcode[2], dcode[3]);
  7488. return QLA_FUNCTION_FAILED;
  7489. }
  7490. dcode = (uint32_t *)req->ring;
  7491. *srisc_addr = 0;
  7492. segments = FA_RISC_CODE_SEGMENTS;
  7493. for (j = 0; j < segments; j++) {
  7494. ql_dbg(ql_dbg_init, vha, 0x0096,
  7495. "-> Loading segment %u...\n", j);
  7496. risc_addr = be32_to_cpu(fwcode[2]);
  7497. risc_size = be32_to_cpu(fwcode[3]);
  7498. if (!*srisc_addr) {
  7499. *srisc_addr = risc_addr;
  7500. risc_attr = be32_to_cpu(fwcode[9]);
  7501. }
  7502. dlen = ha->fw_transfer_size >> 2;
  7503. for (fragment = 0; risc_size; fragment++) {
  7504. if (dlen > risc_size)
  7505. dlen = risc_size;
  7506. ql_dbg(ql_dbg_init, vha, 0x0097,
  7507. "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n",
  7508. fragment, risc_addr,
  7509. (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data),
  7510. dlen);
  7511. for (i = 0; i < dlen; i++)
  7512. dcode[i] = swab32((__force u32)fwcode[i]);
  7513. rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
  7514. if (rval) {
  7515. ql_log(ql_log_fatal, vha, 0x0098,
  7516. "-> Failed load firmware fragment %u.\n",
  7517. fragment);
  7518. return QLA_FUNCTION_FAILED;
  7519. }
  7520. fwcode += dlen;
  7521. risc_addr += dlen;
  7522. risc_size -= dlen;
  7523. }
  7524. }
  7525. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  7526. return QLA_SUCCESS;
  7527. templates = (risc_attr & BIT_9) ? 2 : 1;
  7528. ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates);
  7529. for (j = 0; j < templates; j++, fwdt++) {
  7530. vfree(fwdt->template);
  7531. fwdt->template = NULL;
  7532. fwdt->length = 0;
  7533. risc_size = be32_to_cpu(fwcode[2]);
  7534. ql_dbg(ql_dbg_init, vha, 0x0171,
  7535. "-> fwdt%u template array at %#x (%#x dwords)\n",
  7536. j, (uint32_t)((void *)fwcode - (void *)blob->fw->data),
  7537. risc_size);
  7538. if (!risc_size || !~risc_size) {
  7539. ql_dbg(ql_dbg_init, vha, 0x0172,
  7540. "-> fwdt%u failed to read array\n", j);
  7541. goto failed;
  7542. }
  7543. /* skip header and ignore checksum */
  7544. fwcode += 7;
  7545. risc_size -= 8;
  7546. ql_dbg(ql_dbg_init, vha, 0x0173,
  7547. "-> fwdt%u template allocate template %#x words...\n",
  7548. j, risc_size);
  7549. fwdt->template = vmalloc(risc_size * sizeof(*dcode));
  7550. if (!fwdt->template) {
  7551. ql_log(ql_log_warn, vha, 0x0174,
  7552. "-> fwdt%u failed allocate template.\n", j);
  7553. goto failed;
  7554. }
  7555. dcode = fwdt->template;
  7556. for (i = 0; i < risc_size; i++)
  7557. dcode[i] = (__force u32)fwcode[i];
  7558. if (!qla27xx_fwdt_template_valid(dcode)) {
  7559. ql_log(ql_log_warn, vha, 0x0175,
  7560. "-> fwdt%u failed template validate\n", j);
  7561. goto failed;
  7562. }
  7563. dlen = qla27xx_fwdt_template_size(dcode);
  7564. ql_dbg(ql_dbg_init, vha, 0x0176,
  7565. "-> fwdt%u template size %#lx bytes (%#lx words)\n",
  7566. j, dlen, dlen / sizeof(*dcode));
  7567. if (dlen > risc_size * sizeof(*dcode)) {
  7568. ql_log(ql_log_warn, vha, 0x0177,
  7569. "-> fwdt%u template exceeds array (%-lu bytes)\n",
  7570. j, dlen - risc_size * sizeof(*dcode));
  7571. goto failed;
  7572. }
  7573. fwdt->length = dlen;
  7574. ql_dbg(ql_dbg_init, vha, 0x0178,
  7575. "-> fwdt%u loaded template ok\n", j);
  7576. fwcode += risc_size + 1;
  7577. }
  7578. return QLA_SUCCESS;
  7579. failed:
  7580. vfree(fwdt->template);
  7581. fwdt->template = NULL;
  7582. fwdt->length = 0;
  7583. return QLA_SUCCESS;
  7584. }
  7585. int
  7586. qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
  7587. {
  7588. int rval;
  7589. if (ql2xfwloadbin == 1)
  7590. return qla81xx_load_risc(vha, srisc_addr);
  7591. /*
  7592. * FW Load priority:
  7593. * 1) Firmware via request-firmware interface (.bin file).
  7594. * 2) Firmware residing in flash.
  7595. */
  7596. rval = qla24xx_load_risc_blob(vha, srisc_addr);
  7597. if (rval == QLA_SUCCESS)
  7598. return rval;
  7599. return qla24xx_load_risc_flash(vha, srisc_addr,
  7600. vha->hw->flt_region_fw);
  7601. }
  7602. int
  7603. qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
  7604. {
  7605. int rval;
  7606. struct qla_hw_data *ha = vha->hw;
  7607. struct active_regions active_regions = { };
  7608. if (ql2xfwloadbin == 2)
  7609. goto try_blob_fw;
  7610. /* FW Load priority:
  7611. * 1) Firmware residing in flash.
  7612. * 2) Firmware via request-firmware interface (.bin file).
  7613. * 3) Golden-Firmware residing in flash -- (limited operation).
  7614. */
  7615. if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  7616. goto try_primary_fw;
  7617. qla27xx_get_active_image(vha, &active_regions);
  7618. if (active_regions.global != QLA27XX_SECONDARY_IMAGE)
  7619. goto try_primary_fw;
  7620. ql_dbg(ql_dbg_init, vha, 0x008b,
  7621. "Loading secondary firmware image.\n");
  7622. rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec);
  7623. if (!rval)
  7624. return rval;
  7625. try_primary_fw:
  7626. ql_dbg(ql_dbg_init, vha, 0x008b,
  7627. "Loading primary firmware image.\n");
  7628. rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
  7629. if (!rval)
  7630. return rval;
  7631. try_blob_fw:
  7632. rval = qla24xx_load_risc_blob(vha, srisc_addr);
  7633. if (!rval || !ha->flt_region_gold_fw)
  7634. return rval;
  7635. ql_log(ql_log_info, vha, 0x0099,
  7636. "Attempting to fallback to golden firmware.\n");
  7637. rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
  7638. if (rval)
  7639. return rval;
  7640. ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n");
  7641. ha->flags.running_gold_fw = 1;
  7642. return rval;
  7643. }
  7644. void
  7645. qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
  7646. {
  7647. int ret, retries;
  7648. struct qla_hw_data *ha = vha->hw;
  7649. if (ha->flags.pci_channel_io_perm_failure)
  7650. return;
  7651. if (!IS_FWI2_CAPABLE(ha))
  7652. return;
  7653. if (!ha->fw_major_version)
  7654. return;
  7655. if (!ha->flags.fw_started)
  7656. return;
  7657. ret = qla2x00_stop_firmware(vha);
  7658. for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
  7659. ret != QLA_INVALID_COMMAND && retries ; retries--) {
  7660. ha->isp_ops->reset_chip(vha);
  7661. if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
  7662. continue;
  7663. if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
  7664. continue;
  7665. ql_log(ql_log_info, vha, 0x8015,
  7666. "Attempting retry of stop-firmware command.\n");
  7667. ret = qla2x00_stop_firmware(vha);
  7668. }
  7669. QLA_FW_STOPPED(ha);
  7670. ha->flags.fw_init_done = 0;
  7671. }
  7672. int
  7673. qla24xx_configure_vhba(scsi_qla_host_t *vha)
  7674. {
  7675. int rval = QLA_SUCCESS;
  7676. int rval2;
  7677. uint16_t mb[MAILBOX_REGISTER_COUNT];
  7678. struct qla_hw_data *ha = vha->hw;
  7679. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  7680. if (!vha->vp_idx)
  7681. return -EINVAL;
  7682. rval = qla2x00_fw_ready(base_vha);
  7683. if (rval == QLA_SUCCESS) {
  7684. clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  7685. qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
  7686. }
  7687. vha->flags.management_server_logged_in = 0;
  7688. /* Login to SNS first */
  7689. rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
  7690. BIT_1);
  7691. if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
  7692. if (rval2 == QLA_MEMORY_ALLOC_FAILED)
  7693. ql_dbg(ql_dbg_init, vha, 0x0120,
  7694. "Failed SNS login: loop_id=%x, rval2=%d\n",
  7695. NPH_SNS, rval2);
  7696. else
  7697. ql_dbg(ql_dbg_init, vha, 0x0103,
  7698. "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
  7699. "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
  7700. NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
  7701. return (QLA_FUNCTION_FAILED);
  7702. }
  7703. atomic_set(&vha->loop_down_timer, 0);
  7704. atomic_set(&vha->loop_state, LOOP_UP);
  7705. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  7706. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  7707. rval = qla2x00_loop_resync(base_vha);
  7708. return rval;
  7709. }
  7710. /* 84XX Support **************************************************************/
  7711. static LIST_HEAD(qla_cs84xx_list);
  7712. static DEFINE_MUTEX(qla_cs84xx_mutex);
  7713. static struct qla_chip_state_84xx *
  7714. qla84xx_get_chip(struct scsi_qla_host *vha)
  7715. {
  7716. struct qla_chip_state_84xx *cs84xx;
  7717. struct qla_hw_data *ha = vha->hw;
  7718. mutex_lock(&qla_cs84xx_mutex);
  7719. /* Find any shared 84xx chip. */
  7720. list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
  7721. if (cs84xx->bus == ha->pdev->bus) {
  7722. kref_get(&cs84xx->kref);
  7723. goto done;
  7724. }
  7725. }
  7726. cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
  7727. if (!cs84xx)
  7728. goto done;
  7729. kref_init(&cs84xx->kref);
  7730. spin_lock_init(&cs84xx->access_lock);
  7731. mutex_init(&cs84xx->fw_update_mutex);
  7732. cs84xx->bus = ha->pdev->bus;
  7733. list_add_tail(&cs84xx->list, &qla_cs84xx_list);
  7734. done:
  7735. mutex_unlock(&qla_cs84xx_mutex);
  7736. return cs84xx;
  7737. }
  7738. static void
  7739. __qla84xx_chip_release(struct kref *kref)
  7740. {
  7741. struct qla_chip_state_84xx *cs84xx =
  7742. container_of(kref, struct qla_chip_state_84xx, kref);
  7743. mutex_lock(&qla_cs84xx_mutex);
  7744. list_del(&cs84xx->list);
  7745. mutex_unlock(&qla_cs84xx_mutex);
  7746. kfree(cs84xx);
  7747. }
  7748. void
  7749. qla84xx_put_chip(struct scsi_qla_host *vha)
  7750. {
  7751. struct qla_hw_data *ha = vha->hw;
  7752. if (ha->cs84xx)
  7753. kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
  7754. }
  7755. static int
  7756. qla84xx_init_chip(scsi_qla_host_t *vha)
  7757. {
  7758. int rval;
  7759. uint16_t status[2];
  7760. struct qla_hw_data *ha = vha->hw;
  7761. mutex_lock(&ha->cs84xx->fw_update_mutex);
  7762. rval = qla84xx_verify_chip(vha, status);
  7763. mutex_unlock(&ha->cs84xx->fw_update_mutex);
  7764. return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED :
  7765. QLA_SUCCESS;
  7766. }
  7767. /* 81XX Support **************************************************************/
  7768. int
  7769. qla81xx_nvram_config(scsi_qla_host_t *vha)
  7770. {
  7771. int rval;
  7772. struct init_cb_81xx *icb;
  7773. struct nvram_81xx *nv;
  7774. __le32 *dptr;
  7775. uint8_t *dptr1, *dptr2;
  7776. uint32_t chksum;
  7777. uint16_t cnt;
  7778. struct qla_hw_data *ha = vha->hw;
  7779. uint32_t faddr;
  7780. struct active_regions active_regions = { };
  7781. rval = QLA_SUCCESS;
  7782. icb = (struct init_cb_81xx *)ha->init_cb;
  7783. nv = ha->nvram;
  7784. /* Determine NVRAM starting address. */
  7785. ha->nvram_size = sizeof(*nv);
  7786. ha->vpd_size = FA_NVRAM_VPD_SIZE;
  7787. if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
  7788. ha->vpd_size = FA_VPD_SIZE_82XX;
  7789. if (IS_QLA28XX(ha) || IS_QLA27XX(ha))
  7790. qla28xx_get_aux_images(vha, &active_regions);
  7791. /* Get VPD data into cache */
  7792. ha->vpd = ha->nvram + VPD_OFFSET;
  7793. faddr = ha->flt_region_vpd;
  7794. if (IS_QLA28XX(ha)) {
  7795. if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
  7796. faddr = ha->flt_region_vpd_sec;
  7797. ql_dbg(ql_dbg_init, vha, 0x0110,
  7798. "Loading %s nvram image.\n",
  7799. active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
  7800. "primary" : "secondary");
  7801. }
  7802. ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size);
  7803. /* Get NVRAM data into cache and calculate checksum. */
  7804. faddr = ha->flt_region_nvram;
  7805. if (IS_QLA28XX(ha)) {
  7806. if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
  7807. faddr = ha->flt_region_nvram_sec;
  7808. }
  7809. ql_dbg(ql_dbg_init, vha, 0x0110,
  7810. "Loading %s nvram image.\n",
  7811. active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
  7812. "primary" : "secondary");
  7813. ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
  7814. dptr = (__force __le32 *)nv;
  7815. for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
  7816. chksum += le32_to_cpu(*dptr);
  7817. ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
  7818. "Contents of NVRAM:\n");
  7819. ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
  7820. nv, ha->nvram_size);
  7821. /* Bad NVRAM data, set defaults parameters. */
  7822. if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
  7823. le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
  7824. /* Reset NVRAM data. */
  7825. ql_log(ql_log_info, vha, 0x0073,
  7826. "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
  7827. chksum, nv->id, le16_to_cpu(nv->nvram_version));
  7828. ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv));
  7829. ql_log(ql_log_info, vha, 0x0074,
  7830. "Falling back to functioning (yet invalid -- WWPN) "
  7831. "defaults.\n");
  7832. /*
  7833. * Set default initialization control block.
  7834. */
  7835. memset(nv, 0, ha->nvram_size);
  7836. nv->nvram_version = cpu_to_le16(ICB_VERSION);
  7837. nv->version = cpu_to_le16(ICB_VERSION);
  7838. nv->frame_payload_size = cpu_to_le16(2048);
  7839. nv->execution_throttle = cpu_to_le16(0xFFFF);
  7840. nv->exchange_count = cpu_to_le16(0);
  7841. nv->port_name[0] = 0x21;
  7842. nv->port_name[1] = 0x00 + ha->port_no + 1;
  7843. nv->port_name[2] = 0x00;
  7844. nv->port_name[3] = 0xe0;
  7845. nv->port_name[4] = 0x8b;
  7846. nv->port_name[5] = 0x1c;
  7847. nv->port_name[6] = 0x55;
  7848. nv->port_name[7] = 0x86;
  7849. nv->node_name[0] = 0x20;
  7850. nv->node_name[1] = 0x00;
  7851. nv->node_name[2] = 0x00;
  7852. nv->node_name[3] = 0xe0;
  7853. nv->node_name[4] = 0x8b;
  7854. nv->node_name[5] = 0x1c;
  7855. nv->node_name[6] = 0x55;
  7856. nv->node_name[7] = 0x86;
  7857. nv->login_retry_count = cpu_to_le16(8);
  7858. nv->interrupt_delay_timer = cpu_to_le16(0);
  7859. nv->login_timeout = cpu_to_le16(0);
  7860. nv->firmware_options_1 =
  7861. cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
  7862. nv->firmware_options_2 = cpu_to_le32(2 << 4);
  7863. nv->firmware_options_2 |= cpu_to_le32(BIT_12);
  7864. nv->firmware_options_3 = cpu_to_le32(2 << 13);
  7865. nv->host_p = cpu_to_le32(BIT_11|BIT_10);
  7866. nv->efi_parameters = cpu_to_le32(0);
  7867. nv->reset_delay = 5;
  7868. nv->max_luns_per_target = cpu_to_le16(128);
  7869. nv->port_down_retry_count = cpu_to_le16(30);
  7870. nv->link_down_timeout = cpu_to_le16(180);
  7871. nv->enode_mac[0] = 0x00;
  7872. nv->enode_mac[1] = 0xC0;
  7873. nv->enode_mac[2] = 0xDD;
  7874. nv->enode_mac[3] = 0x04;
  7875. nv->enode_mac[4] = 0x05;
  7876. nv->enode_mac[5] = 0x06 + ha->port_no + 1;
  7877. rval = 1;
  7878. }
  7879. if (IS_T10_PI_CAPABLE(ha))
  7880. nv->frame_payload_size &= cpu_to_le16(~7);
  7881. qlt_81xx_config_nvram_stage1(vha, nv);
  7882. /* Reset Initialization control block */
  7883. memset(icb, 0, ha->init_cb_size);
  7884. /* Copy 1st segment. */
  7885. dptr1 = (uint8_t *)icb;
  7886. dptr2 = (uint8_t *)&nv->version;
  7887. cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
  7888. while (cnt--)
  7889. *dptr1++ = *dptr2++;
  7890. icb->login_retry_count = nv->login_retry_count;
  7891. /* Copy 2nd segment. */
  7892. dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
  7893. dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
  7894. cnt = (uint8_t *)&icb->reserved_5 -
  7895. (uint8_t *)&icb->interrupt_delay_timer;
  7896. while (cnt--)
  7897. *dptr1++ = *dptr2++;
  7898. memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
  7899. /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
  7900. if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
  7901. icb->enode_mac[0] = 0x00;
  7902. icb->enode_mac[1] = 0xC0;
  7903. icb->enode_mac[2] = 0xDD;
  7904. icb->enode_mac[3] = 0x04;
  7905. icb->enode_mac[4] = 0x05;
  7906. icb->enode_mac[5] = 0x06 + ha->port_no + 1;
  7907. }
  7908. /* Use extended-initialization control block. */
  7909. memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
  7910. ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
  7911. /*
  7912. * Setup driver NVRAM options.
  7913. */
  7914. qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
  7915. "QLE8XXX");
  7916. qlt_81xx_config_nvram_stage2(vha, icb);
  7917. /* Use alternate WWN? */
  7918. if (nv->host_p & cpu_to_le32(BIT_15)) {
  7919. memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
  7920. memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
  7921. }
  7922. /* Prepare nodename */
  7923. if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
  7924. /*
  7925. * Firmware will apply the following mask if the nodename was
  7926. * not provided.
  7927. */
  7928. memcpy(icb->node_name, icb->port_name, WWN_SIZE);
  7929. icb->node_name[0] &= 0xF0;
  7930. }
  7931. if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
  7932. if ((nv->enhanced_features & BIT_7) == 0)
  7933. ha->flags.scm_supported_a = 1;
  7934. }
  7935. /* Set host adapter parameters. */
  7936. ha->flags.disable_risc_code_load = 0;
  7937. ha->flags.enable_lip_reset = 0;
  7938. ha->flags.enable_lip_full_login =
  7939. le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
  7940. ha->flags.enable_target_reset =
  7941. le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
  7942. ha->flags.enable_led_scheme = 0;
  7943. ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
  7944. ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
  7945. (BIT_6 | BIT_5 | BIT_4)) >> 4;
  7946. /* save HBA serial number */
  7947. ha->serial0 = icb->port_name[5];
  7948. ha->serial1 = icb->port_name[6];
  7949. ha->serial2 = icb->port_name[7];
  7950. memcpy(vha->node_name, icb->node_name, WWN_SIZE);
  7951. memcpy(vha->port_name, icb->port_name, WWN_SIZE);
  7952. icb->execution_throttle = cpu_to_le16(0xFFFF);
  7953. ha->retry_count = le16_to_cpu(nv->login_retry_count);
  7954. /* Set minimum login_timeout to 4 seconds. */
  7955. if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
  7956. nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
  7957. if (le16_to_cpu(nv->login_timeout) < 4)
  7958. nv->login_timeout = cpu_to_le16(4);
  7959. ha->login_timeout = le16_to_cpu(nv->login_timeout);
  7960. /* Set minimum RATOV to 100 tenths of a second. */
  7961. ha->r_a_tov = 100;
  7962. ha->loop_reset_delay = nv->reset_delay;
  7963. /* Link Down Timeout = 0:
  7964. *
  7965. * When Port Down timer expires we will start returning
  7966. * I/O's to OS with "DID_NO_CONNECT".
  7967. *
  7968. * Link Down Timeout != 0:
  7969. *
  7970. * The driver waits for the link to come up after link down
  7971. * before returning I/Os to OS with "DID_NO_CONNECT".
  7972. */
  7973. if (le16_to_cpu(nv->link_down_timeout) == 0) {
  7974. ha->loop_down_abort_time =
  7975. (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
  7976. } else {
  7977. ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
  7978. ha->loop_down_abort_time =
  7979. (LOOP_DOWN_TIME - ha->link_down_timeout);
  7980. }
  7981. /* Need enough time to try and get the port back. */
  7982. ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
  7983. if (qlport_down_retry)
  7984. ha->port_down_retry_count = qlport_down_retry;
  7985. /* Set login_retry_count */
  7986. ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
  7987. if (ha->port_down_retry_count ==
  7988. le16_to_cpu(nv->port_down_retry_count) &&
  7989. ha->port_down_retry_count > 3)
  7990. ha->login_retry_count = ha->port_down_retry_count;
  7991. else if (ha->port_down_retry_count > (int)ha->login_retry_count)
  7992. ha->login_retry_count = ha->port_down_retry_count;
  7993. if (ql2xloginretrycount)
  7994. ha->login_retry_count = ql2xloginretrycount;
  7995. /* if not running MSI-X we need handshaking on interrupts */
  7996. if (!vha->hw->flags.msix_enabled &&
  7997. (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)))
  7998. icb->firmware_options_2 |= cpu_to_le32(BIT_22);
  7999. /* Enable ZIO. */
  8000. if (!vha->flags.init_done) {
  8001. ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
  8002. (BIT_3 | BIT_2 | BIT_1 | BIT_0);
  8003. ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
  8004. le16_to_cpu(icb->interrupt_delay_timer) : 2;
  8005. }
  8006. icb->firmware_options_2 &= cpu_to_le32(
  8007. ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
  8008. vha->flags.process_response_queue = 0;
  8009. if (ha->zio_mode != QLA_ZIO_DISABLED) {
  8010. ha->zio_mode = QLA_ZIO_MODE_6;
  8011. ql_log(ql_log_info, vha, 0x0075,
  8012. "ZIO mode %d enabled; timer delay (%d us).\n",
  8013. ha->zio_mode,
  8014. ha->zio_timer * 100);
  8015. icb->firmware_options_2 |= cpu_to_le32(
  8016. (uint32_t)ha->zio_mode);
  8017. icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
  8018. vha->flags.process_response_queue = 1;
  8019. }
  8020. /* enable RIDA Format2 */
  8021. icb->firmware_options_3 |= cpu_to_le32(BIT_0);
  8022. /* N2N: driver will initiate Login instead of FW */
  8023. icb->firmware_options_3 |= cpu_to_le32(BIT_8);
  8024. /* Determine NVMe/FCP priority for target ports */
  8025. ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
  8026. if (rval) {
  8027. ql_log(ql_log_warn, vha, 0x0076,
  8028. "NVRAM configuration failed.\n");
  8029. }
  8030. return (rval);
  8031. }
  8032. int
  8033. qla82xx_restart_isp(scsi_qla_host_t *vha)
  8034. {
  8035. int status, rval;
  8036. struct qla_hw_data *ha = vha->hw;
  8037. struct scsi_qla_host *vp, *tvp;
  8038. unsigned long flags;
  8039. status = qla2x00_init_rings(vha);
  8040. if (!status) {
  8041. clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  8042. ha->flags.chip_reset_done = 1;
  8043. status = qla2x00_fw_ready(vha);
  8044. if (!status) {
  8045. /* Issue a marker after FW becomes ready. */
  8046. qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
  8047. vha->flags.online = 1;
  8048. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  8049. }
  8050. /* if no cable then assume it's good */
  8051. if ((vha->device_flags & DFLG_NO_CABLE))
  8052. status = 0;
  8053. }
  8054. if (!status) {
  8055. clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  8056. if (!atomic_read(&vha->loop_down_timer)) {
  8057. /*
  8058. * Issue marker command only when we are going
  8059. * to start the I/O .
  8060. */
  8061. vha->marker_needed = 1;
  8062. }
  8063. ha->isp_ops->enable_intrs(ha);
  8064. ha->isp_abort_cnt = 0;
  8065. clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
  8066. /* Update the firmware version */
  8067. status = qla82xx_check_md_needed(vha);
  8068. if (ha->fce) {
  8069. ha->flags.fce_enabled = 1;
  8070. memset(ha->fce, 0,
  8071. fce_calc_size(ha->fce_bufs));
  8072. rval = qla2x00_enable_fce_trace(vha,
  8073. ha->fce_dma, ha->fce_bufs, ha->fce_mb,
  8074. &ha->fce_bufs);
  8075. if (rval) {
  8076. ql_log(ql_log_warn, vha, 0x8001,
  8077. "Unable to reinitialize FCE (%d).\n",
  8078. rval);
  8079. ha->flags.fce_enabled = 0;
  8080. }
  8081. }
  8082. if (ha->eft) {
  8083. memset(ha->eft, 0, EFT_SIZE);
  8084. rval = qla2x00_enable_eft_trace(vha,
  8085. ha->eft_dma, EFT_NUM_BUFFERS);
  8086. if (rval) {
  8087. ql_log(ql_log_warn, vha, 0x8010,
  8088. "Unable to reinitialize EFT (%d).\n",
  8089. rval);
  8090. }
  8091. }
  8092. }
  8093. if (!status) {
  8094. ql_dbg(ql_dbg_taskm, vha, 0x8011,
  8095. "qla82xx_restart_isp succeeded.\n");
  8096. spin_lock_irqsave(&ha->vport_slock, flags);
  8097. list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
  8098. if (vp->vp_idx) {
  8099. atomic_inc(&vp->vref_count);
  8100. spin_unlock_irqrestore(&ha->vport_slock, flags);
  8101. qla2x00_vp_abort_isp(vp);
  8102. spin_lock_irqsave(&ha->vport_slock, flags);
  8103. atomic_dec(&vp->vref_count);
  8104. }
  8105. }
  8106. spin_unlock_irqrestore(&ha->vport_slock, flags);
  8107. } else {
  8108. ql_log(ql_log_warn, vha, 0x8016,
  8109. "qla82xx_restart_isp **** FAILED ****.\n");
  8110. }
  8111. return status;
  8112. }
  8113. /*
  8114. * qla24xx_get_fcp_prio
  8115. * Gets the fcp cmd priority value for the logged in port.
  8116. * Looks for a match of the port descriptors within
  8117. * each of the fcp prio config entries. If a match is found,
  8118. * the tag (priority) value is returned.
  8119. *
  8120. * Input:
  8121. * vha = scsi host structure pointer.
  8122. * fcport = port structure pointer.
  8123. *
  8124. * Return:
  8125. * non-zero (if found)
  8126. * -1 (if not found)
  8127. *
  8128. * Context:
  8129. * Kernel context
  8130. */
  8131. static int
  8132. qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
  8133. {
  8134. int i, entries;
  8135. uint8_t pid_match, wwn_match;
  8136. int priority;
  8137. uint32_t pid1, pid2;
  8138. uint64_t wwn1, wwn2;
  8139. struct qla_fcp_prio_entry *pri_entry;
  8140. struct qla_hw_data *ha = vha->hw;
  8141. if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
  8142. return -1;
  8143. priority = -1;
  8144. entries = ha->fcp_prio_cfg->num_entries;
  8145. pri_entry = &ha->fcp_prio_cfg->entry[0];
  8146. for (i = 0; i < entries; i++) {
  8147. pid_match = wwn_match = 0;
  8148. if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
  8149. pri_entry++;
  8150. continue;
  8151. }
  8152. /* check source pid for a match */
  8153. if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
  8154. pid1 = pri_entry->src_pid & INVALID_PORT_ID;
  8155. pid2 = vha->d_id.b24 & INVALID_PORT_ID;
  8156. if (pid1 == INVALID_PORT_ID)
  8157. pid_match++;
  8158. else if (pid1 == pid2)
  8159. pid_match++;
  8160. }
  8161. /* check destination pid for a match */
  8162. if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
  8163. pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
  8164. pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
  8165. if (pid1 == INVALID_PORT_ID)
  8166. pid_match++;
  8167. else if (pid1 == pid2)
  8168. pid_match++;
  8169. }
  8170. /* check source WWN for a match */
  8171. if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
  8172. wwn1 = wwn_to_u64(vha->port_name);
  8173. wwn2 = wwn_to_u64(pri_entry->src_wwpn);
  8174. if (wwn2 == (uint64_t)-1)
  8175. wwn_match++;
  8176. else if (wwn1 == wwn2)
  8177. wwn_match++;
  8178. }
  8179. /* check destination WWN for a match */
  8180. if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
  8181. wwn1 = wwn_to_u64(fcport->port_name);
  8182. wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
  8183. if (wwn2 == (uint64_t)-1)
  8184. wwn_match++;
  8185. else if (wwn1 == wwn2)
  8186. wwn_match++;
  8187. }
  8188. if (pid_match == 2 || wwn_match == 2) {
  8189. /* Found a matching entry */
  8190. if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
  8191. priority = pri_entry->tag;
  8192. break;
  8193. }
  8194. pri_entry++;
  8195. }
  8196. return priority;
  8197. }
  8198. /*
  8199. * qla24xx_update_fcport_fcp_prio
  8200. * Activates fcp priority for the logged in fc port
  8201. *
  8202. * Input:
  8203. * vha = scsi host structure pointer.
  8204. * fcp = port structure pointer.
  8205. *
  8206. * Return:
  8207. * QLA_SUCCESS or QLA_FUNCTION_FAILED
  8208. *
  8209. * Context:
  8210. * Kernel context.
  8211. */
  8212. int
  8213. qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
  8214. {
  8215. int ret;
  8216. int priority;
  8217. uint16_t mb[5];
  8218. if (fcport->port_type != FCT_TARGET ||
  8219. fcport->loop_id == FC_NO_LOOP_ID)
  8220. return QLA_FUNCTION_FAILED;
  8221. priority = qla24xx_get_fcp_prio(vha, fcport);
  8222. if (priority < 0)
  8223. return QLA_FUNCTION_FAILED;
  8224. if (IS_P3P_TYPE(vha->hw)) {
  8225. fcport->fcp_prio = priority & 0xf;
  8226. return QLA_SUCCESS;
  8227. }
  8228. ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
  8229. if (ret == QLA_SUCCESS) {
  8230. if (fcport->fcp_prio != priority)
  8231. ql_dbg(ql_dbg_user, vha, 0x709e,
  8232. "Updated FCP_CMND priority - value=%d loop_id=%d "
  8233. "port_id=%02x%02x%02x.\n", priority,
  8234. fcport->loop_id, fcport->d_id.b.domain,
  8235. fcport->d_id.b.area, fcport->d_id.b.al_pa);
  8236. fcport->fcp_prio = priority & 0xf;
  8237. } else
  8238. ql_dbg(ql_dbg_user, vha, 0x704f,
  8239. "Unable to update FCP_CMND priority - ret=0x%x for "
  8240. "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
  8241. fcport->d_id.b.domain, fcport->d_id.b.area,
  8242. fcport->d_id.b.al_pa);
  8243. return ret;
  8244. }
  8245. /*
  8246. * qla24xx_update_all_fcp_prio
  8247. * Activates fcp priority for all the logged in ports
  8248. *
  8249. * Input:
  8250. * ha = adapter block pointer.
  8251. *
  8252. * Return:
  8253. * QLA_SUCCESS or QLA_FUNCTION_FAILED
  8254. *
  8255. * Context:
  8256. * Kernel context.
  8257. */
  8258. int
  8259. qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
  8260. {
  8261. int ret;
  8262. fc_port_t *fcport;
  8263. ret = QLA_FUNCTION_FAILED;
  8264. /* We need to set priority for all logged in ports */
  8265. list_for_each_entry(fcport, &vha->vp_fcports, list)
  8266. ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
  8267. return ret;
  8268. }
  8269. struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
  8270. int vp_idx, bool startqp)
  8271. {
  8272. int rsp_id = 0;
  8273. int req_id = 0;
  8274. int i;
  8275. struct qla_hw_data *ha = vha->hw;
  8276. uint16_t qpair_id = 0;
  8277. struct qla_qpair *qpair = NULL;
  8278. struct qla_msix_entry *msix;
  8279. if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
  8280. ql_log(ql_log_warn, vha, 0x00181,
  8281. "FW/Driver is not multi-queue capable.\n");
  8282. return NULL;
  8283. }
  8284. if (ql2xmqsupport || ql2xnvmeenable) {
  8285. qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
  8286. if (qpair == NULL) {
  8287. ql_log(ql_log_warn, vha, 0x0182,
  8288. "Failed to allocate memory for queue pair.\n");
  8289. return NULL;
  8290. }
  8291. qpair->hw = vha->hw;
  8292. qpair->vha = vha;
  8293. qpair->qp_lock_ptr = &qpair->qp_lock;
  8294. spin_lock_init(&qpair->qp_lock);
  8295. qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
  8296. /* Assign available que pair id */
  8297. mutex_lock(&ha->mq_lock);
  8298. qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
  8299. if (ha->num_qpairs >= ha->max_qpairs) {
  8300. mutex_unlock(&ha->mq_lock);
  8301. ql_log(ql_log_warn, vha, 0x0183,
  8302. "No resources to create additional q pair.\n");
  8303. goto fail_qid_map;
  8304. }
  8305. ha->num_qpairs++;
  8306. set_bit(qpair_id, ha->qpair_qid_map);
  8307. ha->queue_pair_map[qpair_id] = qpair;
  8308. qpair->id = qpair_id;
  8309. qpair->vp_idx = vp_idx;
  8310. qpair->fw_started = ha->flags.fw_started;
  8311. INIT_LIST_HEAD(&qpair->hints_list);
  8312. qpair->chip_reset = ha->base_qpair->chip_reset;
  8313. qpair->enable_class_2 = ha->base_qpair->enable_class_2;
  8314. qpair->enable_explicit_conf =
  8315. ha->base_qpair->enable_explicit_conf;
  8316. for (i = 0; i < ha->msix_count; i++) {
  8317. msix = &ha->msix_entries[i];
  8318. if (msix->in_use)
  8319. continue;
  8320. qpair->msix = msix;
  8321. ql_dbg(ql_dbg_multiq, vha, 0xc00f,
  8322. "Vector %x selected for qpair\n", msix->vector);
  8323. break;
  8324. }
  8325. if (!qpair->msix) {
  8326. ql_log(ql_log_warn, vha, 0x0184,
  8327. "Out of MSI-X vectors!.\n");
  8328. goto fail_msix;
  8329. }
  8330. qpair->msix->in_use = 1;
  8331. list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
  8332. qpair->pdev = ha->pdev;
  8333. if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
  8334. qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
  8335. mutex_unlock(&ha->mq_lock);
  8336. /* Create response queue first */
  8337. rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
  8338. if (!rsp_id) {
  8339. ql_log(ql_log_warn, vha, 0x0185,
  8340. "Failed to create response queue.\n");
  8341. goto fail_rsp;
  8342. }
  8343. qpair->rsp = ha->rsp_q_map[rsp_id];
  8344. /* Create request queue */
  8345. req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
  8346. startqp);
  8347. if (!req_id) {
  8348. ql_log(ql_log_warn, vha, 0x0186,
  8349. "Failed to create request queue.\n");
  8350. goto fail_req;
  8351. }
  8352. qpair->req = ha->req_q_map[req_id];
  8353. qpair->rsp->req = qpair->req;
  8354. qpair->rsp->qpair = qpair;
  8355. if (!qpair->cpu_mapped)
  8356. qla_cpu_update(qpair, raw_smp_processor_id());
  8357. if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
  8358. if (ha->fw_attributes & BIT_4)
  8359. qpair->difdix_supported = 1;
  8360. }
  8361. qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
  8362. if (!qpair->srb_mempool) {
  8363. ql_log(ql_log_warn, vha, 0xd036,
  8364. "Failed to create srb mempool for qpair %d\n",
  8365. qpair->id);
  8366. goto fail_mempool;
  8367. }
  8368. /* Mark as online */
  8369. qpair->online = 1;
  8370. if (!vha->flags.qpairs_available)
  8371. vha->flags.qpairs_available = 1;
  8372. ql_dbg(ql_dbg_multiq, vha, 0xc00d,
  8373. "Request/Response queue pair created, id %d\n",
  8374. qpair->id);
  8375. ql_dbg(ql_dbg_init, vha, 0x0187,
  8376. "Request/Response queue pair created, id %d\n",
  8377. qpair->id);
  8378. }
  8379. return qpair;
  8380. fail_mempool:
  8381. fail_req:
  8382. qla25xx_delete_rsp_que(vha, qpair->rsp);
  8383. fail_rsp:
  8384. mutex_lock(&ha->mq_lock);
  8385. qpair->msix->in_use = 0;
  8386. list_del(&qpair->qp_list_elem);
  8387. if (list_empty(&vha->qp_list))
  8388. vha->flags.qpairs_available = 0;
  8389. fail_msix:
  8390. ha->queue_pair_map[qpair_id] = NULL;
  8391. clear_bit(qpair_id, ha->qpair_qid_map);
  8392. ha->num_qpairs--;
  8393. mutex_unlock(&ha->mq_lock);
  8394. fail_qid_map:
  8395. kfree(qpair);
  8396. return NULL;
  8397. }
  8398. int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
  8399. {
  8400. int ret = QLA_FUNCTION_FAILED;
  8401. struct qla_hw_data *ha = qpair->hw;
  8402. qpair->delete_in_progress = 1;
  8403. ret = qla25xx_delete_req_que(vha, qpair->req);
  8404. if (ret != QLA_SUCCESS)
  8405. goto fail;
  8406. ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
  8407. if (ret != QLA_SUCCESS)
  8408. goto fail;
  8409. mutex_lock(&ha->mq_lock);
  8410. ha->queue_pair_map[qpair->id] = NULL;
  8411. clear_bit(qpair->id, ha->qpair_qid_map);
  8412. ha->num_qpairs--;
  8413. list_del(&qpair->qp_list_elem);
  8414. if (list_empty(&vha->qp_list)) {
  8415. vha->flags.qpairs_available = 0;
  8416. vha->flags.qpairs_req_created = 0;
  8417. vha->flags.qpairs_rsp_created = 0;
  8418. }
  8419. mempool_destroy(qpair->srb_mempool);
  8420. kfree(qpair);
  8421. mutex_unlock(&ha->mq_lock);
  8422. return QLA_SUCCESS;
  8423. fail:
  8424. return ret;
  8425. }
  8426. uint64_t
  8427. qla2x00_count_set_bits(uint32_t num)
  8428. {
  8429. /* Brian Kernighan's Algorithm */
  8430. u64 count = 0;
  8431. while (num) {
  8432. num &= (num - 1);
  8433. count++;
  8434. }
  8435. return count;
  8436. }
  8437. uint64_t
  8438. qla2x00_get_num_tgts(scsi_qla_host_t *vha)
  8439. {
  8440. fc_port_t *f, *tf;
  8441. u64 count = 0;
  8442. f = NULL;
  8443. tf = NULL;
  8444. list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
  8445. if (f->port_type != FCT_TARGET)
  8446. continue;
  8447. count++;
  8448. }
  8449. return count;
  8450. }
  8451. int qla2xxx_reset_stats(struct Scsi_Host *host, u32 flags)
  8452. {
  8453. scsi_qla_host_t *vha = shost_priv(host);
  8454. fc_port_t *fcport = NULL;
  8455. unsigned long int_flags;
  8456. if (flags & QLA2XX_HW_ERROR)
  8457. vha->hw_err_cnt = 0;
  8458. if (flags & QLA2XX_SHT_LNK_DWN)
  8459. vha->short_link_down_cnt = 0;
  8460. if (flags & QLA2XX_INT_ERR)
  8461. vha->interface_err_cnt = 0;
  8462. if (flags & QLA2XX_CMD_TIMEOUT)
  8463. vha->cmd_timeout_cnt = 0;
  8464. if (flags & QLA2XX_RESET_CMD_ERR)
  8465. vha->reset_cmd_err_cnt = 0;
  8466. if (flags & QLA2XX_TGT_SHT_LNK_DOWN) {
  8467. spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
  8468. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  8469. fcport->tgt_short_link_down_cnt = 0;
  8470. fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
  8471. }
  8472. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
  8473. }
  8474. vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
  8475. return 0;
  8476. }
  8477. int qla2xxx_start_stats(struct Scsi_Host *host, u32 flags)
  8478. {
  8479. return qla2xxx_reset_stats(host, flags);
  8480. }
  8481. int qla2xxx_stop_stats(struct Scsi_Host *host, u32 flags)
  8482. {
  8483. return qla2xxx_reset_stats(host, flags);
  8484. }
  8485. int qla2xxx_get_ini_stats(struct Scsi_Host *host, u32 flags,
  8486. void *data, u64 size)
  8487. {
  8488. scsi_qla_host_t *vha = shost_priv(host);
  8489. struct ql_vnd_host_stats_resp *resp = (struct ql_vnd_host_stats_resp *)data;
  8490. struct ql_vnd_stats *rsp_data = &resp->stats;
  8491. u64 ini_entry_count = 0;
  8492. u64 i = 0;
  8493. u64 entry_count = 0;
  8494. u64 num_tgt = 0;
  8495. u32 tmp_stat_type = 0;
  8496. fc_port_t *fcport = NULL;
  8497. unsigned long int_flags;
  8498. /* Copy stat type to work on it */
  8499. tmp_stat_type = flags;
  8500. if (tmp_stat_type & BIT_17) {
  8501. num_tgt = qla2x00_get_num_tgts(vha);
  8502. /* unset BIT_17 */
  8503. tmp_stat_type &= ~(1 << 17);
  8504. }
  8505. ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
  8506. entry_count = ini_entry_count + num_tgt;
  8507. rsp_data->entry_count = entry_count;
  8508. i = 0;
  8509. if (flags & QLA2XX_HW_ERROR) {
  8510. rsp_data->entry[i].stat_type = QLA2XX_HW_ERROR;
  8511. rsp_data->entry[i].tgt_num = 0x0;
  8512. rsp_data->entry[i].cnt = vha->hw_err_cnt;
  8513. i++;
  8514. }
  8515. if (flags & QLA2XX_SHT_LNK_DWN) {
  8516. rsp_data->entry[i].stat_type = QLA2XX_SHT_LNK_DWN;
  8517. rsp_data->entry[i].tgt_num = 0x0;
  8518. rsp_data->entry[i].cnt = vha->short_link_down_cnt;
  8519. i++;
  8520. }
  8521. if (flags & QLA2XX_INT_ERR) {
  8522. rsp_data->entry[i].stat_type = QLA2XX_INT_ERR;
  8523. rsp_data->entry[i].tgt_num = 0x0;
  8524. rsp_data->entry[i].cnt = vha->interface_err_cnt;
  8525. i++;
  8526. }
  8527. if (flags & QLA2XX_CMD_TIMEOUT) {
  8528. rsp_data->entry[i].stat_type = QLA2XX_CMD_TIMEOUT;
  8529. rsp_data->entry[i].tgt_num = 0x0;
  8530. rsp_data->entry[i].cnt = vha->cmd_timeout_cnt;
  8531. i++;
  8532. }
  8533. if (flags & QLA2XX_RESET_CMD_ERR) {
  8534. rsp_data->entry[i].stat_type = QLA2XX_RESET_CMD_ERR;
  8535. rsp_data->entry[i].tgt_num = 0x0;
  8536. rsp_data->entry[i].cnt = vha->reset_cmd_err_cnt;
  8537. i++;
  8538. }
  8539. /* i will continue from previous loop, as target
  8540. * entries are after initiator
  8541. */
  8542. if (flags & QLA2XX_TGT_SHT_LNK_DOWN) {
  8543. spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
  8544. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  8545. if (fcport->port_type != FCT_TARGET)
  8546. continue;
  8547. if (!fcport->rport)
  8548. continue;
  8549. rsp_data->entry[i].stat_type = QLA2XX_TGT_SHT_LNK_DOWN;
  8550. rsp_data->entry[i].tgt_num = fcport->rport->number;
  8551. rsp_data->entry[i].cnt = fcport->tgt_short_link_down_cnt;
  8552. i++;
  8553. }
  8554. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
  8555. }
  8556. resp->status = EXT_STATUS_OK;
  8557. return 0;
  8558. }
  8559. int qla2xxx_get_tgt_stats(struct Scsi_Host *host, u32 flags,
  8560. struct fc_rport *rport, void *data, u64 size)
  8561. {
  8562. struct ql_vnd_tgt_stats_resp *tgt_data = data;
  8563. fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
  8564. tgt_data->status = 0;
  8565. tgt_data->stats.entry_count = 1;
  8566. tgt_data->stats.entry[0].stat_type = flags;
  8567. tgt_data->stats.entry[0].tgt_num = rport->number;
  8568. tgt_data->stats.entry[0].cnt = fcport->tgt_short_link_down_cnt;
  8569. return 0;
  8570. }
  8571. int qla2xxx_disable_port(struct Scsi_Host *host)
  8572. {
  8573. scsi_qla_host_t *vha = shost_priv(host);
  8574. vha->hw->flags.port_isolated = 1;
  8575. if (qla2x00_isp_reg_stat(vha->hw)) {
  8576. ql_log(ql_log_info, vha, 0x9006,
  8577. "PCI/Register disconnect, exiting.\n");
  8578. qla_pci_set_eeh_busy(vha);
  8579. return FAILED;
  8580. }
  8581. if (qla2x00_chip_is_down(vha))
  8582. return 0;
  8583. if (vha->flags.online) {
  8584. qla2x00_abort_isp_cleanup(vha);
  8585. qla2x00_wait_for_sess_deletion(vha);
  8586. }
  8587. return 0;
  8588. }
  8589. int qla2xxx_enable_port(struct Scsi_Host *host)
  8590. {
  8591. scsi_qla_host_t *vha = shost_priv(host);
  8592. if (qla2x00_isp_reg_stat(vha->hw)) {
  8593. ql_log(ql_log_info, vha, 0x9001,
  8594. "PCI/Register disconnect, exiting.\n");
  8595. qla_pci_set_eeh_busy(vha);
  8596. return FAILED;
  8597. }
  8598. vha->hw->flags.port_isolated = 0;
  8599. /* Set the flag to 1, so that isp_abort can proceed */
  8600. vha->flags.online = 1;
  8601. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  8602. qla2xxx_wake_dpc(vha);
  8603. return 0;
  8604. }