qla_target.c 198 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
  4. *
  5. * based on qla2x00t.c code:
  6. *
  7. * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <[email protected]>
  8. * Copyright (C) 2004 - 2005 Leonid Stoljar
  9. * Copyright (C) 2006 Nathaniel Clark <[email protected]>
  10. * Copyright (C) 2006 - 2010 ID7 Ltd.
  11. *
  12. * Forward port and refactoring to modern qla2xxx and target/configfs
  13. *
  14. * Copyright (C) 2010-2013 Nicholas A. Bellinger <[email protected]>
  15. */
  16. #include <linux/module.h>
  17. #include <linux/init.h>
  18. #include <linux/types.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/pci.h>
  22. #include <linux/delay.h>
  23. #include <linux/list.h>
  24. #include <linux/workqueue.h>
  25. #include <asm/unaligned.h>
  26. #include <scsi/scsi.h>
  27. #include <scsi/scsi_host.h>
  28. #include <scsi/scsi_tcq.h>
  29. #include "qla_def.h"
  30. #include "qla_target.h"
  31. static int ql2xtgt_tape_enable;
  32. module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
  33. MODULE_PARM_DESC(ql2xtgt_tape_enable,
  34. "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
  35. static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
  36. module_param(qlini_mode, charp, S_IRUGO);
  37. MODULE_PARM_DESC(qlini_mode,
  38. "Determines when initiator mode will be enabled. Possible values: "
  39. "\"exclusive\" - initiator mode will be enabled on load, "
  40. "disabled on enabling target mode and then on disabling target mode "
  41. "enabled back; "
  42. "\"disabled\" - initiator mode will never be enabled; "
  43. "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
  44. "when ready "
  45. "\"enabled\" (default) - initiator mode will always stay enabled.");
  46. int ql2xuctrlirq = 1;
  47. module_param(ql2xuctrlirq, int, 0644);
  48. MODULE_PARM_DESC(ql2xuctrlirq,
  49. "User to control IRQ placement via smp_affinity."
  50. "Valid with qlini_mode=disabled."
  51. "1(default): enable");
  52. int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
  53. static int qla_sam_status = SAM_STAT_BUSY;
  54. static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
  55. /*
  56. * From scsi/fc/fc_fcp.h
  57. */
  58. enum fcp_resp_rsp_codes {
  59. FCP_TMF_CMPL = 0,
  60. FCP_DATA_LEN_INVALID = 1,
  61. FCP_CMND_FIELDS_INVALID = 2,
  62. FCP_DATA_PARAM_MISMATCH = 3,
  63. FCP_TMF_REJECTED = 4,
  64. FCP_TMF_FAILED = 5,
  65. FCP_TMF_INVALID_LUN = 9,
  66. };
  67. /*
  68. * fc_pri_ta from scsi/fc/fc_fcp.h
  69. */
  70. #define FCP_PTA_SIMPLE 0 /* simple task attribute */
  71. #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
  72. #define FCP_PTA_ORDERED 2 /* ordered task attribute */
  73. #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
  74. #define FCP_PTA_MASK 7 /* mask for task attribute field */
  75. #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
  76. #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
  77. /*
  78. * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
  79. * must be called under HW lock and could unlock/lock it inside.
  80. * It isn't an issue, since in the current implementation on the time when
  81. * those functions are called:
  82. *
  83. * - Either context is IRQ and only IRQ handler can modify HW data,
  84. * including rings related fields,
  85. *
  86. * - Or access to target mode variables from struct qla_tgt doesn't
  87. * cross those functions boundaries, except tgt_stop, which
  88. * additionally protected by irq_cmd_count.
  89. */
  90. /* Predefs for callbacks handed to qla2xxx LLD */
  91. static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
  92. struct atio_from_isp *pkt, uint8_t);
  93. static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
  94. response_t *pkt);
  95. static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
  96. int fn, void *iocb, int flags);
  97. static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
  98. *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
  99. static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
  100. struct atio_from_isp *atio, uint16_t status, int qfull);
  101. static void qlt_disable_vha(struct scsi_qla_host *vha);
  102. static void qlt_clear_tgt_db(struct qla_tgt *tgt);
  103. static void qlt_send_notify_ack(struct qla_qpair *qpair,
  104. struct imm_ntfy_from_isp *ntfy,
  105. uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
  106. uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
  107. static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
  108. struct imm_ntfy_from_isp *imm, int ha_locked);
  109. static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
  110. fc_port_t *fcport, bool local);
  111. void qlt_unreg_sess(struct fc_port *sess);
  112. static void qlt_24xx_handle_abts(struct scsi_qla_host *,
  113. struct abts_recv_from_24xx *);
  114. static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
  115. uint16_t);
  116. static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
  117. static inline uint32_t qlt_make_handle(struct qla_qpair *);
  118. /*
  119. * Global Variables
  120. */
  121. static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
  122. struct kmem_cache *qla_tgt_plogi_cachep;
  123. static mempool_t *qla_tgt_mgmt_cmd_mempool;
  124. static struct workqueue_struct *qla_tgt_wq;
  125. static DEFINE_MUTEX(qla_tgt_mutex);
  126. static LIST_HEAD(qla_tgt_glist);
  127. static const char *prot_op_str(u32 prot_op)
  128. {
  129. switch (prot_op) {
  130. case TARGET_PROT_NORMAL: return "NORMAL";
  131. case TARGET_PROT_DIN_INSERT: return "DIN_INSERT";
  132. case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT";
  133. case TARGET_PROT_DIN_STRIP: return "DIN_STRIP";
  134. case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP";
  135. case TARGET_PROT_DIN_PASS: return "DIN_PASS";
  136. case TARGET_PROT_DOUT_PASS: return "DOUT_PASS";
  137. default: return "UNKNOWN";
  138. }
  139. }
  140. /* This API intentionally takes dest as a parameter, rather than returning
  141. * int value to avoid caller forgetting to issue wmb() after the store */
  142. void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
  143. {
  144. scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
  145. *dest = atomic_inc_return(&base_vha->generation_tick);
  146. /* memory barrier */
  147. wmb();
  148. }
  149. /* Might release hw lock, then reaquire!! */
  150. static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
  151. {
  152. /* Send marker if required */
  153. if (unlikely(vha->marker_needed != 0)) {
  154. int rc = qla2x00_issue_marker(vha, vha_locked);
  155. if (rc != QLA_SUCCESS) {
  156. ql_dbg(ql_dbg_tgt, vha, 0xe03d,
  157. "qla_target(%d): issue_marker() failed\n",
  158. vha->vp_idx);
  159. }
  160. return rc;
  161. }
  162. return QLA_SUCCESS;
  163. }
  164. struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha,
  165. be_id_t d_id)
  166. {
  167. struct scsi_qla_host *host;
  168. uint32_t key;
  169. if (vha->d_id.b.area == d_id.area &&
  170. vha->d_id.b.domain == d_id.domain &&
  171. vha->d_id.b.al_pa == d_id.al_pa)
  172. return vha;
  173. key = be_to_port_id(d_id).b24;
  174. host = btree_lookup32(&vha->hw->host_map, key);
  175. if (!host)
  176. ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
  177. "Unable to find host %06x\n", key);
  178. return host;
  179. }
  180. static inline
  181. struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
  182. uint16_t vp_idx)
  183. {
  184. struct qla_hw_data *ha = vha->hw;
  185. if (vha->vp_idx == vp_idx)
  186. return vha;
  187. BUG_ON(ha->tgt.tgt_vp_map == NULL);
  188. if (likely(test_bit(vp_idx, ha->vp_idx_map)))
  189. return ha->tgt.tgt_vp_map[vp_idx].vha;
  190. return NULL;
  191. }
  192. static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
  193. {
  194. unsigned long flags;
  195. spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
  196. vha->hw->tgt.num_pend_cmds++;
  197. if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
  198. vha->qla_stats.stat_max_pend_cmds =
  199. vha->hw->tgt.num_pend_cmds;
  200. spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
  201. }
  202. static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
  203. {
  204. unsigned long flags;
  205. spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
  206. vha->hw->tgt.num_pend_cmds--;
  207. spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
  208. }
  209. static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
  210. struct atio_from_isp *atio, uint8_t ha_locked)
  211. {
  212. struct qla_tgt_sess_op *u;
  213. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  214. unsigned long flags;
  215. if (tgt->tgt_stop) {
  216. ql_dbg(ql_dbg_async, vha, 0x502c,
  217. "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
  218. vha->vp_idx);
  219. goto out_term;
  220. }
  221. u = kzalloc(sizeof(*u), GFP_ATOMIC);
  222. if (u == NULL)
  223. goto out_term;
  224. u->vha = vha;
  225. memcpy(&u->atio, atio, sizeof(*atio));
  226. INIT_LIST_HEAD(&u->cmd_list);
  227. spin_lock_irqsave(&vha->cmd_list_lock, flags);
  228. list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
  229. spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
  230. schedule_delayed_work(&vha->unknown_atio_work, 1);
  231. out:
  232. return;
  233. out_term:
  234. qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
  235. goto out;
  236. }
  237. static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
  238. uint8_t ha_locked)
  239. {
  240. struct qla_tgt_sess_op *u, *t;
  241. scsi_qla_host_t *host;
  242. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  243. unsigned long flags;
  244. uint8_t queued = 0;
  245. list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
  246. if (u->aborted) {
  247. ql_dbg(ql_dbg_async, vha, 0x502e,
  248. "Freeing unknown %s %p, because of Abort\n",
  249. "ATIO_TYPE7", u);
  250. qlt_send_term_exchange(vha->hw->base_qpair, NULL,
  251. &u->atio, ha_locked, 0);
  252. goto abort;
  253. }
  254. host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
  255. if (host != NULL) {
  256. ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
  257. "Requeuing unknown ATIO_TYPE7 %p\n", u);
  258. qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
  259. } else if (tgt->tgt_stop) {
  260. ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
  261. "Freeing unknown %s %p, because tgt is being stopped\n",
  262. "ATIO_TYPE7", u);
  263. qlt_send_term_exchange(vha->hw->base_qpair, NULL,
  264. &u->atio, ha_locked, 0);
  265. } else {
  266. ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
  267. "Reschedule u %p, vha %p, host %p\n", u, vha, host);
  268. if (!queued) {
  269. queued = 1;
  270. schedule_delayed_work(&vha->unknown_atio_work,
  271. 1);
  272. }
  273. continue;
  274. }
  275. abort:
  276. spin_lock_irqsave(&vha->cmd_list_lock, flags);
  277. list_del(&u->cmd_list);
  278. spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
  279. kfree(u);
  280. }
  281. }
  282. void qlt_unknown_atio_work_fn(struct work_struct *work)
  283. {
  284. struct scsi_qla_host *vha = container_of(to_delayed_work(work),
  285. struct scsi_qla_host, unknown_atio_work);
  286. qlt_try_to_dequeue_unknown_atios(vha, 0);
  287. }
  288. static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
  289. struct atio_from_isp *atio, uint8_t ha_locked)
  290. {
  291. ql_dbg(ql_dbg_tgt, vha, 0xe072,
  292. "%s: qla_target(%d): type %x ox_id %04x\n",
  293. __func__, vha->vp_idx, atio->u.raw.entry_type,
  294. be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
  295. switch (atio->u.raw.entry_type) {
  296. case ATIO_TYPE7:
  297. {
  298. struct scsi_qla_host *host = qla_find_host_by_d_id(vha,
  299. atio->u.isp24.fcp_hdr.d_id);
  300. if (unlikely(NULL == host)) {
  301. ql_dbg(ql_dbg_tgt, vha, 0xe03e,
  302. "qla_target(%d): Received ATIO_TYPE7 "
  303. "with unknown d_id %x:%x:%x\n", vha->vp_idx,
  304. atio->u.isp24.fcp_hdr.d_id.domain,
  305. atio->u.isp24.fcp_hdr.d_id.area,
  306. atio->u.isp24.fcp_hdr.d_id.al_pa);
  307. qlt_queue_unknown_atio(vha, atio, ha_locked);
  308. break;
  309. }
  310. if (unlikely(!list_empty(&vha->unknown_atio_list)))
  311. qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
  312. qlt_24xx_atio_pkt(host, atio, ha_locked);
  313. break;
  314. }
  315. case IMMED_NOTIFY_TYPE:
  316. {
  317. struct scsi_qla_host *host = vha;
  318. struct imm_ntfy_from_isp *entry =
  319. (struct imm_ntfy_from_isp *)atio;
  320. qlt_issue_marker(vha, ha_locked);
  321. if ((entry->u.isp24.vp_index != 0xFF) &&
  322. (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) {
  323. host = qlt_find_host_by_vp_idx(vha,
  324. entry->u.isp24.vp_index);
  325. if (unlikely(!host)) {
  326. ql_dbg(ql_dbg_tgt, vha, 0xe03f,
  327. "qla_target(%d): Received "
  328. "ATIO (IMMED_NOTIFY_TYPE) "
  329. "with unknown vp_index %d\n",
  330. vha->vp_idx, entry->u.isp24.vp_index);
  331. break;
  332. }
  333. }
  334. qlt_24xx_atio_pkt(host, atio, ha_locked);
  335. break;
  336. }
  337. case VP_RPT_ID_IOCB_TYPE:
  338. qla24xx_report_id_acquisition(vha,
  339. (struct vp_rpt_id_entry_24xx *)atio);
  340. break;
  341. case ABTS_RECV_24XX:
  342. {
  343. struct abts_recv_from_24xx *entry =
  344. (struct abts_recv_from_24xx *)atio;
  345. struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
  346. entry->vp_index);
  347. unsigned long flags;
  348. if (unlikely(!host)) {
  349. ql_dbg(ql_dbg_tgt, vha, 0xe00a,
  350. "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
  351. "received, with unknown vp_index %d\n",
  352. vha->vp_idx, entry->vp_index);
  353. break;
  354. }
  355. if (!ha_locked)
  356. spin_lock_irqsave(&host->hw->hardware_lock, flags);
  357. qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
  358. if (!ha_locked)
  359. spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
  360. break;
  361. }
  362. /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
  363. default:
  364. ql_dbg(ql_dbg_tgt, vha, 0xe040,
  365. "qla_target(%d): Received unknown ATIO atio "
  366. "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
  367. break;
  368. }
  369. return false;
  370. }
  371. void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
  372. struct rsp_que *rsp, response_t *pkt)
  373. {
  374. switch (pkt->entry_type) {
  375. case CTIO_CRC2:
  376. ql_dbg(ql_dbg_tgt, vha, 0xe073,
  377. "qla_target(%d):%s: CRC2 Response pkt\n",
  378. vha->vp_idx, __func__);
  379. fallthrough;
  380. case CTIO_TYPE7:
  381. {
  382. struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
  383. struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
  384. entry->vp_index);
  385. if (unlikely(!host)) {
  386. ql_dbg(ql_dbg_tgt, vha, 0xe041,
  387. "qla_target(%d): Response pkt (CTIO_TYPE7) "
  388. "received, with unknown vp_index %d\n",
  389. vha->vp_idx, entry->vp_index);
  390. break;
  391. }
  392. qlt_response_pkt(host, rsp, pkt);
  393. break;
  394. }
  395. case IMMED_NOTIFY_TYPE:
  396. {
  397. struct scsi_qla_host *host;
  398. struct imm_ntfy_from_isp *entry =
  399. (struct imm_ntfy_from_isp *)pkt;
  400. host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
  401. if (unlikely(!host)) {
  402. ql_dbg(ql_dbg_tgt, vha, 0xe042,
  403. "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
  404. "received, with unknown vp_index %d\n",
  405. vha->vp_idx, entry->u.isp24.vp_index);
  406. break;
  407. }
  408. qlt_response_pkt(host, rsp, pkt);
  409. break;
  410. }
  411. case NOTIFY_ACK_TYPE:
  412. {
  413. struct scsi_qla_host *host = vha;
  414. struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
  415. if (0xFF != entry->u.isp24.vp_index) {
  416. host = qlt_find_host_by_vp_idx(vha,
  417. entry->u.isp24.vp_index);
  418. if (unlikely(!host)) {
  419. ql_dbg(ql_dbg_tgt, vha, 0xe043,
  420. "qla_target(%d): Response "
  421. "pkt (NOTIFY_ACK_TYPE) "
  422. "received, with unknown "
  423. "vp_index %d\n", vha->vp_idx,
  424. entry->u.isp24.vp_index);
  425. break;
  426. }
  427. }
  428. qlt_response_pkt(host, rsp, pkt);
  429. break;
  430. }
  431. case ABTS_RECV_24XX:
  432. {
  433. struct abts_recv_from_24xx *entry =
  434. (struct abts_recv_from_24xx *)pkt;
  435. struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
  436. entry->vp_index);
  437. if (unlikely(!host)) {
  438. ql_dbg(ql_dbg_tgt, vha, 0xe044,
  439. "qla_target(%d): Response pkt "
  440. "(ABTS_RECV_24XX) received, with unknown "
  441. "vp_index %d\n", vha->vp_idx, entry->vp_index);
  442. break;
  443. }
  444. qlt_response_pkt(host, rsp, pkt);
  445. break;
  446. }
  447. case ABTS_RESP_24XX:
  448. {
  449. struct abts_resp_to_24xx *entry =
  450. (struct abts_resp_to_24xx *)pkt;
  451. struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
  452. entry->vp_index);
  453. if (unlikely(!host)) {
  454. ql_dbg(ql_dbg_tgt, vha, 0xe045,
  455. "qla_target(%d): Response pkt "
  456. "(ABTS_RECV_24XX) received, with unknown "
  457. "vp_index %d\n", vha->vp_idx, entry->vp_index);
  458. break;
  459. }
  460. qlt_response_pkt(host, rsp, pkt);
  461. break;
  462. }
  463. default:
  464. qlt_response_pkt(vha, rsp, pkt);
  465. break;
  466. }
  467. }
  468. /*
  469. * All qlt_plogi_ack_t operations are protected by hardware_lock
  470. */
  471. static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
  472. struct imm_ntfy_from_isp *ntfy, int type)
  473. {
  474. struct qla_work_evt *e;
  475. e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
  476. if (!e)
  477. return QLA_FUNCTION_FAILED;
  478. e->u.nack.fcport = fcport;
  479. e->u.nack.type = type;
  480. memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
  481. return qla2x00_post_work(vha, e);
  482. }
  483. static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
  484. {
  485. struct scsi_qla_host *vha = sp->vha;
  486. unsigned long flags;
  487. ql_dbg(ql_dbg_disc, vha, 0x20f2,
  488. "Async done-%s res %x %8phC type %d\n",
  489. sp->name, res, sp->fcport->port_name, sp->type);
  490. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  491. sp->fcport->flags &= ~FCF_ASYNC_SENT;
  492. sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
  493. switch (sp->type) {
  494. case SRB_NACK_PLOGI:
  495. sp->fcport->login_gen++;
  496. sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
  497. sp->fcport->logout_on_delete = 1;
  498. sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
  499. sp->fcport->send_els_logo = 0;
  500. if (sp->fcport->flags & FCF_FCSP_DEVICE) {
  501. ql_dbg(ql_dbg_edif, vha, 0x20ef,
  502. "%s %8phC edif: PLOGI- AUTH WAIT\n", __func__,
  503. sp->fcport->port_name);
  504. qla2x00_set_fcport_disc_state(sp->fcport,
  505. DSC_LOGIN_AUTH_PEND);
  506. qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
  507. sp->fcport->d_id.b24);
  508. qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24,
  509. 0, sp->fcport);
  510. }
  511. break;
  512. case SRB_NACK_PRLI:
  513. sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
  514. sp->fcport->deleted = 0;
  515. sp->fcport->send_els_logo = 0;
  516. if (!sp->fcport->login_succ &&
  517. !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
  518. sp->fcport->login_succ = 1;
  519. vha->fcport_count++;
  520. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  521. qla24xx_sched_upd_fcport(sp->fcport);
  522. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  523. } else {
  524. sp->fcport->login_retry = 0;
  525. qla2x00_set_fcport_disc_state(sp->fcport,
  526. DSC_LOGIN_COMPLETE);
  527. sp->fcport->deleted = 0;
  528. sp->fcport->logout_on_delete = 1;
  529. }
  530. break;
  531. case SRB_NACK_LOGO:
  532. sp->fcport->login_gen++;
  533. sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
  534. qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
  535. break;
  536. }
  537. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  538. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  539. }
  540. int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
  541. struct imm_ntfy_from_isp *ntfy, int type)
  542. {
  543. int rval = QLA_FUNCTION_FAILED;
  544. srb_t *sp;
  545. char *c = NULL;
  546. fcport->flags |= FCF_ASYNC_SENT;
  547. switch (type) {
  548. case SRB_NACK_PLOGI:
  549. fcport->fw_login_state = DSC_LS_PLOGI_PEND;
  550. c = "PLOGI";
  551. if (vha->hw->flags.edif_enabled &&
  552. (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP))
  553. fcport->flags |= FCF_FCSP_DEVICE;
  554. break;
  555. case SRB_NACK_PRLI:
  556. fcport->fw_login_state = DSC_LS_PRLI_PEND;
  557. fcport->deleted = 0;
  558. c = "PRLI";
  559. break;
  560. case SRB_NACK_LOGO:
  561. fcport->fw_login_state = DSC_LS_LOGO_PEND;
  562. c = "LOGO";
  563. break;
  564. }
  565. sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
  566. if (!sp)
  567. goto done;
  568. sp->type = type;
  569. sp->name = "nack";
  570. qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
  571. qla2x00_async_nack_sp_done);
  572. sp->u.iocb_cmd.u.nack.ntfy = ntfy;
  573. ql_dbg(ql_dbg_disc, vha, 0x20f4,
  574. "Async-%s %8phC hndl %x %s\n",
  575. sp->name, fcport->port_name, sp->handle, c);
  576. rval = qla2x00_start_sp(sp);
  577. if (rval != QLA_SUCCESS)
  578. goto done_free_sp;
  579. return rval;
  580. done_free_sp:
  581. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  582. done:
  583. fcport->flags &= ~FCF_ASYNC_SENT;
  584. return rval;
  585. }
  586. void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
  587. {
  588. fc_port_t *t;
  589. switch (e->u.nack.type) {
  590. case SRB_NACK_PRLI:
  591. t = e->u.nack.fcport;
  592. flush_work(&t->del_work);
  593. flush_work(&t->free_work);
  594. mutex_lock(&vha->vha_tgt.tgt_mutex);
  595. t = qlt_create_sess(vha, e->u.nack.fcport, 0);
  596. mutex_unlock(&vha->vha_tgt.tgt_mutex);
  597. if (t) {
  598. ql_log(ql_log_info, vha, 0xd034,
  599. "%s create sess success %p", __func__, t);
  600. /* create sess has an extra kref */
  601. vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
  602. }
  603. break;
  604. }
  605. qla24xx_async_notify_ack(vha, e->u.nack.fcport,
  606. (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type);
  607. }
  608. void qla24xx_delete_sess_fn(struct work_struct *work)
  609. {
  610. fc_port_t *fcport = container_of(work, struct fc_port, del_work);
  611. struct qla_hw_data *ha = NULL;
  612. if (!fcport || !fcport->vha || !fcport->vha->hw)
  613. return;
  614. ha = fcport->vha->hw;
  615. if (fcport->se_sess) {
  616. ha->tgt.tgt_ops->shutdown_sess(fcport);
  617. ha->tgt.tgt_ops->put_sess(fcport);
  618. } else {
  619. qlt_unreg_sess(fcport);
  620. }
  621. }
  622. /*
  623. * Called from qla2x00_reg_remote_port()
  624. */
  625. void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
  626. {
  627. struct qla_hw_data *ha = vha->hw;
  628. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  629. struct fc_port *sess = fcport;
  630. unsigned long flags;
  631. if (!vha->hw->tgt.tgt_ops)
  632. return;
  633. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  634. if (tgt->tgt_stop) {
  635. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  636. return;
  637. }
  638. if (fcport->disc_state == DSC_DELETE_PEND) {
  639. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  640. return;
  641. }
  642. if (!sess->se_sess) {
  643. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  644. mutex_lock(&vha->vha_tgt.tgt_mutex);
  645. sess = qlt_create_sess(vha, fcport, false);
  646. mutex_unlock(&vha->vha_tgt.tgt_mutex);
  647. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  648. } else {
  649. if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
  650. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  651. return;
  652. }
  653. if (!kref_get_unless_zero(&sess->sess_kref)) {
  654. ql_dbg(ql_dbg_disc, vha, 0x2107,
  655. "%s: kref_get fail sess %8phC \n",
  656. __func__, sess->port_name);
  657. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  658. return;
  659. }
  660. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
  661. "qla_target(%u): %ssession for port %8phC "
  662. "(loop ID %d) reappeared\n", vha->vp_idx,
  663. sess->local ? "local " : "", sess->port_name, sess->loop_id);
  664. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
  665. "Reappeared sess %p\n", sess);
  666. ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
  667. fcport->loop_id,
  668. (fcport->flags & FCF_CONF_COMP_SUPPORTED));
  669. }
  670. if (sess && sess->local) {
  671. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
  672. "qla_target(%u): local session for "
  673. "port %8phC (loop ID %d) became global\n", vha->vp_idx,
  674. fcport->port_name, sess->loop_id);
  675. sess->local = 0;
  676. }
  677. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  678. ha->tgt.tgt_ops->put_sess(sess);
  679. }
  680. /*
  681. * This is a zero-base ref-counting solution, since hardware_lock
  682. * guarantees that ref_count is not modified concurrently.
  683. * Upon successful return content of iocb is undefined
  684. */
  685. static struct qlt_plogi_ack_t *
  686. qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
  687. struct imm_ntfy_from_isp *iocb)
  688. {
  689. struct qlt_plogi_ack_t *pla;
  690. lockdep_assert_held(&vha->hw->hardware_lock);
  691. list_for_each_entry(pla, &vha->plogi_ack_list, list) {
  692. if (pla->id.b24 == id->b24) {
  693. ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
  694. "%s %d %8phC Term INOT due to new INOT",
  695. __func__, __LINE__,
  696. pla->iocb.u.isp24.port_name);
  697. qlt_send_term_imm_notif(vha, &pla->iocb, 1);
  698. memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
  699. return pla;
  700. }
  701. }
  702. pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
  703. if (!pla) {
  704. ql_dbg(ql_dbg_async, vha, 0x5088,
  705. "qla_target(%d): Allocation of plogi_ack failed\n",
  706. vha->vp_idx);
  707. return NULL;
  708. }
  709. memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
  710. pla->id = *id;
  711. list_add_tail(&pla->list, &vha->plogi_ack_list);
  712. return pla;
  713. }
  714. void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
  715. struct qlt_plogi_ack_t *pla)
  716. {
  717. struct imm_ntfy_from_isp *iocb = &pla->iocb;
  718. port_id_t port_id;
  719. uint16_t loop_id;
  720. fc_port_t *fcport = pla->fcport;
  721. BUG_ON(!pla->ref_count);
  722. pla->ref_count--;
  723. if (pla->ref_count)
  724. return;
  725. ql_dbg(ql_dbg_disc, vha, 0x5089,
  726. "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
  727. " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
  728. iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
  729. iocb->u.isp24.port_id[0],
  730. le16_to_cpu(iocb->u.isp24.nport_handle),
  731. iocb->u.isp24.exchange_address, iocb->ox_id);
  732. port_id.b.domain = iocb->u.isp24.port_id[2];
  733. port_id.b.area = iocb->u.isp24.port_id[1];
  734. port_id.b.al_pa = iocb->u.isp24.port_id[0];
  735. port_id.b.rsvd_1 = 0;
  736. loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
  737. fcport->loop_id = loop_id;
  738. fcport->d_id = port_id;
  739. if (iocb->u.isp24.status_subcode == ELS_PLOGI)
  740. qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
  741. else
  742. qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
  743. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  744. if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
  745. fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
  746. if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
  747. fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
  748. }
  749. list_del(&pla->list);
  750. kmem_cache_free(qla_tgt_plogi_cachep, pla);
  751. }
  752. void
  753. qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
  754. struct fc_port *sess, enum qlt_plogi_link_t link)
  755. {
  756. struct imm_ntfy_from_isp *iocb = &pla->iocb;
  757. /* Inc ref_count first because link might already be pointing at pla */
  758. pla->ref_count++;
  759. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
  760. "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
  761. " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
  762. sess, link, sess->port_name,
  763. iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
  764. iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
  765. pla->ref_count, pla, link);
  766. if (link == QLT_PLOGI_LINK_CONFLICT) {
  767. switch (sess->disc_state) {
  768. case DSC_DELETED:
  769. case DSC_DELETE_PEND:
  770. pla->ref_count--;
  771. return;
  772. default:
  773. break;
  774. }
  775. }
  776. if (sess->plogi_link[link])
  777. qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
  778. if (link == QLT_PLOGI_LINK_SAME_WWN)
  779. pla->fcport = sess;
  780. sess->plogi_link[link] = pla;
  781. }
  782. typedef struct {
  783. /* These fields must be initialized by the caller */
  784. port_id_t id;
  785. /*
  786. * number of cmds dropped while we were waiting for
  787. * initiator to ack LOGO initialize to 1 if LOGO is
  788. * triggered by a command, otherwise, to 0
  789. */
  790. int cmd_count;
  791. /* These fields are used by callee */
  792. struct list_head list;
  793. } qlt_port_logo_t;
  794. static void
  795. qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
  796. {
  797. qlt_port_logo_t *tmp;
  798. int res;
  799. if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
  800. res = 0;
  801. goto out;
  802. }
  803. mutex_lock(&vha->vha_tgt.tgt_mutex);
  804. list_for_each_entry(tmp, &vha->logo_list, list) {
  805. if (tmp->id.b24 == logo->id.b24) {
  806. tmp->cmd_count += logo->cmd_count;
  807. mutex_unlock(&vha->vha_tgt.tgt_mutex);
  808. return;
  809. }
  810. }
  811. list_add_tail(&logo->list, &vha->logo_list);
  812. mutex_unlock(&vha->vha_tgt.tgt_mutex);
  813. res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
  814. mutex_lock(&vha->vha_tgt.tgt_mutex);
  815. list_del(&logo->list);
  816. mutex_unlock(&vha->vha_tgt.tgt_mutex);
  817. out:
  818. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
  819. "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
  820. logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
  821. logo->cmd_count, res);
  822. }
  823. void qlt_free_session_done(struct work_struct *work)
  824. {
  825. struct fc_port *sess = container_of(work, struct fc_port,
  826. free_work);
  827. struct qla_tgt *tgt = sess->tgt;
  828. struct scsi_qla_host *vha = sess->vha;
  829. struct qla_hw_data *ha = vha->hw;
  830. unsigned long flags;
  831. bool logout_started = false;
  832. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  833. struct qlt_plogi_ack_t *own =
  834. sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
  835. ql_dbg(ql_dbg_disc, vha, 0xf084,
  836. "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
  837. " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
  838. __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
  839. sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
  840. sess->logout_on_delete, sess->keep_nport_handle,
  841. sess->send_els_logo);
  842. if (!IS_SW_RESV_ADDR(sess->d_id)) {
  843. qla2x00_mark_device_lost(vha, sess, 0);
  844. if (sess->send_els_logo) {
  845. qlt_port_logo_t logo;
  846. logo.id = sess->d_id;
  847. logo.cmd_count = 0;
  848. INIT_LIST_HEAD(&logo.list);
  849. if (!own)
  850. qlt_send_first_logo(vha, &logo);
  851. sess->send_els_logo = 0;
  852. }
  853. if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
  854. int rc;
  855. if (!own ||
  856. (own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
  857. sess->logout_completed = 0;
  858. rc = qla2x00_post_async_logout_work(vha, sess,
  859. NULL);
  860. if (rc != QLA_SUCCESS)
  861. ql_log(ql_log_warn, vha, 0xf085,
  862. "Schedule logo failed sess %p rc %d\n",
  863. sess, rc);
  864. else
  865. logout_started = true;
  866. } else if (own && (own->iocb.u.isp24.status_subcode ==
  867. ELS_PRLI) && ha->flags.rida_fmt2) {
  868. rc = qla2x00_post_async_prlo_work(vha, sess,
  869. NULL);
  870. if (rc != QLA_SUCCESS)
  871. ql_log(ql_log_warn, vha, 0xf085,
  872. "Schedule PRLO failed sess %p rc %d\n",
  873. sess, rc);
  874. else
  875. logout_started = true;
  876. }
  877. } /* if sess->logout_on_delete */
  878. if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
  879. !(sess->nvme_flag & NVME_FLAG_DELETING)) {
  880. sess->nvme_flag |= NVME_FLAG_DELETING;
  881. qla_nvme_unregister_remote_port(sess);
  882. }
  883. if (ha->flags.edif_enabled &&
  884. (!own || (own &&
  885. own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
  886. sess->edif.authok = 0;
  887. if (!ha->flags.host_shutting_down) {
  888. ql_dbg(ql_dbg_edif, vha, 0x911e,
  889. "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
  890. __func__, sess->port_name);
  891. qla2x00_release_all_sadb(vha, sess);
  892. } else {
  893. ql_dbg(ql_dbg_edif, vha, 0x911e,
  894. "%s bypassing release_all_sadb\n",
  895. __func__);
  896. }
  897. qla_edif_clear_appdata(vha, sess);
  898. qla_edif_sess_down(vha, sess);
  899. }
  900. }
  901. /*
  902. * Release the target session for FC Nexus from fabric module code.
  903. */
  904. if (sess->se_sess != NULL)
  905. ha->tgt.tgt_ops->free_session(sess);
  906. if (logout_started) {
  907. bool traced = false;
  908. u16 cnt = 0;
  909. while (!READ_ONCE(sess->logout_completed)) {
  910. if (!traced) {
  911. ql_dbg(ql_dbg_disc, vha, 0xf086,
  912. "%s: waiting for sess %p logout\n",
  913. __func__, sess);
  914. traced = true;
  915. }
  916. msleep(100);
  917. cnt++;
  918. /*
  919. * Driver timeout is set to 22 Sec, update count value to loop
  920. * long enough for log-out to complete before advancing. Otherwise,
  921. * straddling logout can interfere with re-login attempt.
  922. */
  923. if (cnt > 230)
  924. break;
  925. }
  926. ql_dbg(ql_dbg_disc, vha, 0xf087,
  927. "%s: sess %p logout completed\n", __func__, sess);
  928. }
  929. if (sess->logo_ack_needed) {
  930. sess->logo_ack_needed = 0;
  931. qla24xx_async_notify_ack(vha, sess,
  932. (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
  933. }
  934. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  935. if (sess->se_sess) {
  936. sess->se_sess = NULL;
  937. if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
  938. tgt->sess_count--;
  939. }
  940. qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
  941. sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
  942. if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
  943. vha->fcport_count--;
  944. sess->login_succ = 0;
  945. }
  946. qla2x00_clear_loop_id(sess);
  947. if (sess->conflict) {
  948. sess->conflict->login_pause = 0;
  949. sess->conflict = NULL;
  950. if (!test_bit(UNLOADING, &vha->dpc_flags))
  951. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  952. }
  953. {
  954. struct qlt_plogi_ack_t *con =
  955. sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
  956. struct imm_ntfy_from_isp *iocb;
  957. own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
  958. if (con) {
  959. iocb = &con->iocb;
  960. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
  961. "se_sess %p / sess %p port %8phC is gone,"
  962. " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
  963. sess->se_sess, sess, sess->port_name,
  964. own ? "releasing own PLOGI" : "no own PLOGI pending",
  965. own ? own->ref_count : -1,
  966. iocb->u.isp24.port_name, con->ref_count);
  967. qlt_plogi_ack_unref(vha, con);
  968. sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
  969. } else {
  970. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
  971. "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
  972. sess->se_sess, sess, sess->port_name,
  973. own ? "releasing own PLOGI" :
  974. "no own PLOGI pending",
  975. own ? own->ref_count : -1);
  976. }
  977. if (own) {
  978. sess->fw_login_state = DSC_LS_PLOGI_PEND;
  979. qlt_plogi_ack_unref(vha, own);
  980. sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
  981. }
  982. }
  983. sess->explicit_logout = 0;
  984. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  985. qla2x00_dfs_remove_rport(vha, sess);
  986. spin_lock_irqsave(&vha->work_lock, flags);
  987. sess->flags &= ~FCF_ASYNC_SENT;
  988. sess->deleted = QLA_SESS_DELETED;
  989. sess->free_pending = 0;
  990. spin_unlock_irqrestore(&vha->work_lock, flags);
  991. ql_dbg(ql_dbg_disc, vha, 0xf001,
  992. "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
  993. sess, sess->port_name, vha->fcport_count);
  994. if (tgt && (tgt->sess_count == 0))
  995. wake_up_all(&tgt->waitQ);
  996. if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) &&
  997. !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) &&
  998. (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
  999. switch (vha->host->active_mode) {
  1000. case MODE_INITIATOR:
  1001. case MODE_DUAL:
  1002. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  1003. qla2xxx_wake_dpc(vha);
  1004. break;
  1005. case MODE_TARGET:
  1006. default:
  1007. /* no-op */
  1008. break;
  1009. }
  1010. }
  1011. if (vha->fcport_count == 0)
  1012. wake_up_all(&vha->fcport_waitQ);
  1013. }
  1014. /* ha->tgt.sess_lock supposed to be held on entry */
  1015. void qlt_unreg_sess(struct fc_port *sess)
  1016. {
  1017. struct scsi_qla_host *vha = sess->vha;
  1018. unsigned long flags;
  1019. ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
  1020. "%s sess %p for deletion %8phC\n",
  1021. __func__, sess, sess->port_name);
  1022. spin_lock_irqsave(&sess->vha->work_lock, flags);
  1023. if (sess->free_pending) {
  1024. spin_unlock_irqrestore(&sess->vha->work_lock, flags);
  1025. return;
  1026. }
  1027. sess->free_pending = 1;
  1028. /*
  1029. * Use FCF_ASYNC_SENT flag to block other cmds used in sess
  1030. * management from being sent.
  1031. */
  1032. sess->flags |= FCF_ASYNC_SENT;
  1033. sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
  1034. spin_unlock_irqrestore(&sess->vha->work_lock, flags);
  1035. if (sess->se_sess)
  1036. vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
  1037. qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
  1038. sess->last_rscn_gen = sess->rscn_gen;
  1039. sess->last_login_gen = sess->login_gen;
  1040. queue_work(sess->vha->hw->wq, &sess->free_work);
  1041. }
  1042. EXPORT_SYMBOL(qlt_unreg_sess);
  1043. static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
  1044. {
  1045. struct qla_hw_data *ha = vha->hw;
  1046. struct fc_port *sess = NULL;
  1047. uint16_t loop_id;
  1048. int res = 0;
  1049. struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
  1050. unsigned long flags;
  1051. loop_id = le16_to_cpu(n->u.isp24.nport_handle);
  1052. if (loop_id == 0xFFFF) {
  1053. /* Global event */
  1054. atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
  1055. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  1056. qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
  1057. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  1058. } else {
  1059. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  1060. sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
  1061. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  1062. }
  1063. ql_dbg(ql_dbg_tgt, vha, 0xe000,
  1064. "Using sess for qla_tgt_reset: %p\n", sess);
  1065. if (!sess) {
  1066. res = -ESRCH;
  1067. return res;
  1068. }
  1069. ql_dbg(ql_dbg_tgt, vha, 0xe047,
  1070. "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
  1071. "loop_id %d)\n", vha->host_no, sess, sess->port_name,
  1072. mcmd, loop_id);
  1073. return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
  1074. }
  1075. static void qla24xx_chk_fcp_state(struct fc_port *sess)
  1076. {
  1077. if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
  1078. sess->logout_on_delete = 0;
  1079. sess->logo_ack_needed = 0;
  1080. sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
  1081. }
  1082. }
  1083. void qlt_schedule_sess_for_deletion(struct fc_port *sess)
  1084. {
  1085. struct qla_tgt *tgt = sess->tgt;
  1086. unsigned long flags;
  1087. u16 sec;
  1088. switch (sess->disc_state) {
  1089. case DSC_DELETE_PEND:
  1090. return;
  1091. case DSC_DELETED:
  1092. if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
  1093. !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) {
  1094. if (tgt && tgt->tgt_stop && tgt->sess_count == 0)
  1095. wake_up_all(&tgt->waitQ);
  1096. if (sess->vha->fcport_count == 0)
  1097. wake_up_all(&sess->vha->fcport_waitQ);
  1098. return;
  1099. }
  1100. break;
  1101. case DSC_UPD_FCPORT:
  1102. /*
  1103. * This port is not done reporting to upper layer.
  1104. * let it finish
  1105. */
  1106. sess->next_disc_state = DSC_DELETE_PEND;
  1107. sec = jiffies_to_msecs(jiffies -
  1108. sess->jiffies_at_registration)/1000;
  1109. if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
  1110. sess->sec_since_registration = sec;
  1111. ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
  1112. "%s %8phC : Slow Rport registration(%d Sec)\n",
  1113. __func__, sess->port_name, sec);
  1114. }
  1115. return;
  1116. default:
  1117. break;
  1118. }
  1119. spin_lock_irqsave(&sess->vha->work_lock, flags);
  1120. if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
  1121. spin_unlock_irqrestore(&sess->vha->work_lock, flags);
  1122. return;
  1123. }
  1124. sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
  1125. spin_unlock_irqrestore(&sess->vha->work_lock, flags);
  1126. sess->prli_pend_timer = 0;
  1127. qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
  1128. qla24xx_chk_fcp_state(sess);
  1129. ql_dbg(ql_log_warn, sess->vha, 0xe001,
  1130. "Scheduling sess %p for deletion %8phC fc4_type %x\n",
  1131. sess, sess->port_name, sess->fc4_type);
  1132. WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
  1133. }
  1134. static void qlt_clear_tgt_db(struct qla_tgt *tgt)
  1135. {
  1136. struct fc_port *sess;
  1137. scsi_qla_host_t *vha = tgt->vha;
  1138. list_for_each_entry(sess, &vha->vp_fcports, list) {
  1139. if (sess->se_sess)
  1140. qlt_schedule_sess_for_deletion(sess);
  1141. }
  1142. /* At this point tgt could be already dead */
  1143. }
  1144. static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id,
  1145. uint16_t *loop_id)
  1146. {
  1147. struct qla_hw_data *ha = vha->hw;
  1148. dma_addr_t gid_list_dma;
  1149. struct gid_list_info *gid_list, *gid;
  1150. int res, rc, i;
  1151. uint16_t entries;
  1152. gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
  1153. &gid_list_dma, GFP_KERNEL);
  1154. if (!gid_list) {
  1155. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
  1156. "qla_target(%d): DMA Alloc failed of %u\n",
  1157. vha->vp_idx, qla2x00_gid_list_size(ha));
  1158. return -ENOMEM;
  1159. }
  1160. /* Get list of logged in devices */
  1161. rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
  1162. if (rc != QLA_SUCCESS) {
  1163. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
  1164. "qla_target(%d): get_id_list() failed: %x\n",
  1165. vha->vp_idx, rc);
  1166. res = -EBUSY;
  1167. goto out_free_id_list;
  1168. }
  1169. gid = gid_list;
  1170. res = -ENOENT;
  1171. for (i = 0; i < entries; i++) {
  1172. if (gid->al_pa == s_id.al_pa &&
  1173. gid->area == s_id.area &&
  1174. gid->domain == s_id.domain) {
  1175. *loop_id = le16_to_cpu(gid->loop_id);
  1176. res = 0;
  1177. break;
  1178. }
  1179. gid = (void *)gid + ha->gid_list_info_size;
  1180. }
  1181. out_free_id_list:
  1182. dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
  1183. gid_list, gid_list_dma);
  1184. return res;
  1185. }
  1186. /*
  1187. * Adds an extra ref to allow to drop hw lock after adding sess to the list.
  1188. * Caller must put it.
  1189. */
  1190. static struct fc_port *qlt_create_sess(
  1191. struct scsi_qla_host *vha,
  1192. fc_port_t *fcport,
  1193. bool local)
  1194. {
  1195. struct qla_hw_data *ha = vha->hw;
  1196. struct fc_port *sess = fcport;
  1197. unsigned long flags;
  1198. if (vha->vha_tgt.qla_tgt->tgt_stop)
  1199. return NULL;
  1200. if (fcport->se_sess) {
  1201. if (!kref_get_unless_zero(&sess->sess_kref)) {
  1202. ql_dbg(ql_dbg_disc, vha, 0x20f6,
  1203. "%s: kref_get_unless_zero failed for %8phC\n",
  1204. __func__, sess->port_name);
  1205. return NULL;
  1206. }
  1207. return fcport;
  1208. }
  1209. sess->tgt = vha->vha_tgt.qla_tgt;
  1210. sess->local = local;
  1211. /*
  1212. * Under normal circumstances we want to logout from firmware when
  1213. * session eventually ends and release corresponding nport handle.
  1214. * In the exception cases (e.g. when new PLOGI is waiting) corresponding
  1215. * code will adjust these flags as necessary.
  1216. */
  1217. sess->logout_on_delete = 1;
  1218. sess->keep_nport_handle = 0;
  1219. sess->logout_completed = 0;
  1220. if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
  1221. &fcport->port_name[0], sess) < 0) {
  1222. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
  1223. "(%d) %8phC check_initiator_node_acl failed\n",
  1224. vha->vp_idx, fcport->port_name);
  1225. return NULL;
  1226. } else {
  1227. kref_init(&fcport->sess_kref);
  1228. /*
  1229. * Take an extra reference to ->sess_kref here to handle
  1230. * fc_port access across ->tgt.sess_lock reaquire.
  1231. */
  1232. if (!kref_get_unless_zero(&sess->sess_kref)) {
  1233. ql_dbg(ql_dbg_disc, vha, 0x20f7,
  1234. "%s: kref_get_unless_zero failed for %8phC\n",
  1235. __func__, sess->port_name);
  1236. return NULL;
  1237. }
  1238. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  1239. if (!IS_SW_RESV_ADDR(sess->d_id))
  1240. vha->vha_tgt.qla_tgt->sess_count++;
  1241. qlt_do_generation_tick(vha, &sess->generation);
  1242. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  1243. }
  1244. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
  1245. "Adding sess %p se_sess %p to tgt %p sess_count %d\n",
  1246. sess, sess->se_sess, vha->vha_tgt.qla_tgt,
  1247. vha->vha_tgt.qla_tgt->sess_count);
  1248. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
  1249. "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
  1250. "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
  1251. vha->vp_idx, local ? "local " : "", fcport->port_name,
  1252. fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
  1253. sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
  1254. return sess;
  1255. }
  1256. /*
  1257. * max_gen - specifies maximum session generation
  1258. * at which this deletion requestion is still valid
  1259. */
  1260. void
  1261. qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
  1262. {
  1263. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  1264. struct fc_port *sess = fcport;
  1265. unsigned long flags;
  1266. if (!vha->hw->tgt.tgt_ops)
  1267. return;
  1268. if (!tgt)
  1269. return;
  1270. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  1271. if (tgt->tgt_stop) {
  1272. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  1273. return;
  1274. }
  1275. if (!sess->se_sess) {
  1276. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  1277. return;
  1278. }
  1279. if (max_gen - sess->generation < 0) {
  1280. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  1281. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
  1282. "Ignoring stale deletion request for se_sess %p / sess %p"
  1283. " for port %8phC, req_gen %d, sess_gen %d\n",
  1284. sess->se_sess, sess, sess->port_name, max_gen,
  1285. sess->generation);
  1286. return;
  1287. }
  1288. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
  1289. sess->local = 1;
  1290. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  1291. qlt_schedule_sess_for_deletion(sess);
  1292. }
  1293. static inline int test_tgt_sess_count(struct qla_tgt *tgt)
  1294. {
  1295. struct qla_hw_data *ha = tgt->ha;
  1296. unsigned long flags;
  1297. int res;
  1298. /*
  1299. * We need to protect against race, when tgt is freed before or
  1300. * inside wake_up()
  1301. */
  1302. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  1303. ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
  1304. "tgt %p, sess_count=%d\n",
  1305. tgt, tgt->sess_count);
  1306. res = (tgt->sess_count == 0);
  1307. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  1308. return res;
  1309. }
  1310. /* Called by tcm_qla2xxx configfs code */
  1311. int qlt_stop_phase1(struct qla_tgt *tgt)
  1312. {
  1313. struct scsi_qla_host *vha = tgt->vha;
  1314. struct qla_hw_data *ha = tgt->ha;
  1315. unsigned long flags;
  1316. mutex_lock(&ha->optrom_mutex);
  1317. mutex_lock(&qla_tgt_mutex);
  1318. if (tgt->tgt_stop || tgt->tgt_stopped) {
  1319. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
  1320. "Already in tgt->tgt_stop or tgt_stopped state\n");
  1321. mutex_unlock(&qla_tgt_mutex);
  1322. mutex_unlock(&ha->optrom_mutex);
  1323. return -EPERM;
  1324. }
  1325. ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
  1326. vha->host_no, vha);
  1327. /*
  1328. * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
  1329. * Lock is needed, because we still can get an incoming packet.
  1330. */
  1331. mutex_lock(&vha->vha_tgt.tgt_mutex);
  1332. tgt->tgt_stop = 1;
  1333. qlt_clear_tgt_db(tgt);
  1334. mutex_unlock(&vha->vha_tgt.tgt_mutex);
  1335. mutex_unlock(&qla_tgt_mutex);
  1336. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
  1337. "Waiting for sess works (tgt %p)", tgt);
  1338. spin_lock_irqsave(&tgt->sess_work_lock, flags);
  1339. do {
  1340. spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
  1341. flush_work(&tgt->sess_work);
  1342. spin_lock_irqsave(&tgt->sess_work_lock, flags);
  1343. } while (!list_empty(&tgt->sess_works_list));
  1344. spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
  1345. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
  1346. "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
  1347. wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
  1348. /* Big hammer */
  1349. if (!ha->flags.host_shutting_down &&
  1350. (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
  1351. qlt_disable_vha(vha);
  1352. /* Wait for sessions to clear out (just in case) */
  1353. wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
  1354. mutex_unlock(&ha->optrom_mutex);
  1355. return 0;
  1356. }
  1357. EXPORT_SYMBOL(qlt_stop_phase1);
  1358. /* Called by tcm_qla2xxx configfs code */
  1359. void qlt_stop_phase2(struct qla_tgt *tgt)
  1360. {
  1361. scsi_qla_host_t *vha = tgt->vha;
  1362. if (tgt->tgt_stopped) {
  1363. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
  1364. "Already in tgt->tgt_stopped state\n");
  1365. dump_stack();
  1366. return;
  1367. }
  1368. if (!tgt->tgt_stop) {
  1369. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
  1370. "%s: phase1 stop is not completed\n", __func__);
  1371. dump_stack();
  1372. return;
  1373. }
  1374. mutex_lock(&tgt->ha->optrom_mutex);
  1375. mutex_lock(&vha->vha_tgt.tgt_mutex);
  1376. tgt->tgt_stop = 0;
  1377. tgt->tgt_stopped = 1;
  1378. mutex_unlock(&vha->vha_tgt.tgt_mutex);
  1379. mutex_unlock(&tgt->ha->optrom_mutex);
  1380. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
  1381. tgt);
  1382. switch (vha->qlini_mode) {
  1383. case QLA2XXX_INI_MODE_EXCLUSIVE:
  1384. vha->flags.online = 1;
  1385. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1386. break;
  1387. default:
  1388. break;
  1389. }
  1390. }
  1391. EXPORT_SYMBOL(qlt_stop_phase2);
  1392. /* Called from qlt_remove_target() -> qla2x00_remove_one() */
  1393. static void qlt_release(struct qla_tgt *tgt)
  1394. {
  1395. scsi_qla_host_t *vha = tgt->vha;
  1396. void *node;
  1397. u64 key = 0;
  1398. u16 i;
  1399. struct qla_qpair_hint *h;
  1400. struct qla_hw_data *ha = vha->hw;
  1401. if (!tgt->tgt_stop && !tgt->tgt_stopped)
  1402. qlt_stop_phase1(tgt);
  1403. if (!tgt->tgt_stopped)
  1404. qlt_stop_phase2(tgt);
  1405. for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
  1406. unsigned long flags;
  1407. h = &tgt->qphints[i];
  1408. if (h->qpair) {
  1409. spin_lock_irqsave(h->qpair->qp_lock_ptr, flags);
  1410. list_del(&h->hint_elem);
  1411. spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags);
  1412. h->qpair = NULL;
  1413. }
  1414. }
  1415. kfree(tgt->qphints);
  1416. mutex_lock(&qla_tgt_mutex);
  1417. list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
  1418. mutex_unlock(&qla_tgt_mutex);
  1419. btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
  1420. btree_remove64(&tgt->lun_qpair_map, key);
  1421. btree_destroy64(&tgt->lun_qpair_map);
  1422. if (vha->vp_idx)
  1423. if (ha->tgt.tgt_ops &&
  1424. ha->tgt.tgt_ops->remove_target &&
  1425. vha->vha_tgt.target_lport_ptr)
  1426. ha->tgt.tgt_ops->remove_target(vha);
  1427. vha->vha_tgt.qla_tgt = NULL;
  1428. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
  1429. "Release of tgt %p finished\n", tgt);
  1430. kfree(tgt);
  1431. }
  1432. /* ha->hardware_lock supposed to be held on entry */
  1433. static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
  1434. const void *param, unsigned int param_size)
  1435. {
  1436. struct qla_tgt_sess_work_param *prm;
  1437. unsigned long flags;
  1438. prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
  1439. if (!prm) {
  1440. ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
  1441. "qla_target(%d): Unable to create session "
  1442. "work, command will be refused", 0);
  1443. return -ENOMEM;
  1444. }
  1445. ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
  1446. "Scheduling work (type %d, prm %p)"
  1447. " to find session for param %p (size %d, tgt %p)\n",
  1448. type, prm, param, param_size, tgt);
  1449. prm->type = type;
  1450. memcpy(&prm->tm_iocb, param, param_size);
  1451. spin_lock_irqsave(&tgt->sess_work_lock, flags);
  1452. list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
  1453. spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
  1454. schedule_work(&tgt->sess_work);
  1455. return 0;
  1456. }
  1457. /*
  1458. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  1459. */
  1460. static void qlt_send_notify_ack(struct qla_qpair *qpair,
  1461. struct imm_ntfy_from_isp *ntfy,
  1462. uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
  1463. uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
  1464. {
  1465. struct scsi_qla_host *vha = qpair->vha;
  1466. struct qla_hw_data *ha = vha->hw;
  1467. request_t *pkt;
  1468. struct nack_to_isp *nack;
  1469. if (!ha->flags.fw_started)
  1470. return;
  1471. ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
  1472. pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
  1473. if (!pkt) {
  1474. ql_dbg(ql_dbg_tgt, vha, 0xe049,
  1475. "qla_target(%d): %s failed: unable to allocate "
  1476. "request packet\n", vha->vp_idx, __func__);
  1477. return;
  1478. }
  1479. if (vha->vha_tgt.qla_tgt != NULL)
  1480. vha->vha_tgt.qla_tgt->notify_ack_expected++;
  1481. pkt->entry_type = NOTIFY_ACK_TYPE;
  1482. pkt->entry_count = 1;
  1483. nack = (struct nack_to_isp *)pkt;
  1484. nack->ox_id = ntfy->ox_id;
  1485. nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
  1486. nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
  1487. if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
  1488. nack->u.isp24.flags = ntfy->u.isp24.flags &
  1489. cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
  1490. }
  1491. nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
  1492. nack->u.isp24.status = ntfy->u.isp24.status;
  1493. nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
  1494. nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
  1495. nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
  1496. nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
  1497. nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
  1498. nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
  1499. nack->u.isp24.srr_reject_code = srr_reject_code;
  1500. nack->u.isp24.srr_reject_code_expl = srr_explan;
  1501. nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
  1502. /* TODO qualify this with EDIF enable */
  1503. if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
  1504. (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
  1505. nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
  1506. }
  1507. ql_dbg(ql_dbg_tgt, vha, 0xe005,
  1508. "qla_target(%d): Sending 24xx Notify Ack %d\n",
  1509. vha->vp_idx, nack->u.isp24.status);
  1510. /* Memory Barrier */
  1511. wmb();
  1512. qla2x00_start_iocbs(vha, qpair->req);
  1513. }
  1514. static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
  1515. {
  1516. struct scsi_qla_host *vha = mcmd->vha;
  1517. struct qla_hw_data *ha = vha->hw;
  1518. struct abts_resp_to_24xx *resp;
  1519. __le32 f_ctl;
  1520. uint32_t h;
  1521. uint8_t *p;
  1522. int rc;
  1523. struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
  1524. struct qla_qpair *qpair = mcmd->qpair;
  1525. ql_dbg(ql_dbg_tgt, vha, 0xe006,
  1526. "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
  1527. ha, mcmd->fc_tm_rsp);
  1528. rc = qlt_check_reserve_free_req(qpair, 1);
  1529. if (rc) {
  1530. ql_dbg(ql_dbg_tgt, vha, 0xe04a,
  1531. "qla_target(%d): %s failed: unable to allocate request packet\n",
  1532. vha->vp_idx, __func__);
  1533. return -EAGAIN;
  1534. }
  1535. resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
  1536. memset(resp, 0, sizeof(*resp));
  1537. h = qlt_make_handle(qpair);
  1538. if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
  1539. /*
  1540. * CTIO type 7 from the firmware doesn't provide a way to
  1541. * know the initiator's LOOP ID, hence we can't find
  1542. * the session and, so, the command.
  1543. */
  1544. return -EAGAIN;
  1545. } else {
  1546. qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
  1547. }
  1548. resp->handle = make_handle(qpair->req->id, h);
  1549. resp->entry_type = ABTS_RESP_24XX;
  1550. resp->entry_count = 1;
  1551. resp->nport_handle = abts->nport_handle;
  1552. resp->vp_index = vha->vp_idx;
  1553. resp->sof_type = abts->sof_type;
  1554. resp->exchange_address = abts->exchange_address;
  1555. resp->fcp_hdr_le = abts->fcp_hdr_le;
  1556. f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
  1557. F_CTL_LAST_SEQ | F_CTL_END_SEQ |
  1558. F_CTL_SEQ_INITIATIVE);
  1559. p = (uint8_t *)&f_ctl;
  1560. resp->fcp_hdr_le.f_ctl[0] = *p++;
  1561. resp->fcp_hdr_le.f_ctl[1] = *p++;
  1562. resp->fcp_hdr_le.f_ctl[2] = *p;
  1563. resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
  1564. resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
  1565. resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
  1566. if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
  1567. resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
  1568. resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
  1569. resp->payload.ba_acct.low_seq_cnt = 0x0000;
  1570. resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
  1571. resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
  1572. resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
  1573. } else {
  1574. resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
  1575. resp->payload.ba_rjt.reason_code =
  1576. BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
  1577. /* Other bytes are zero */
  1578. }
  1579. vha->vha_tgt.qla_tgt->abts_resp_expected++;
  1580. /* Memory Barrier */
  1581. wmb();
  1582. if (qpair->reqq_start_iocbs)
  1583. qpair->reqq_start_iocbs(qpair);
  1584. else
  1585. qla2x00_start_iocbs(vha, qpair->req);
  1586. return rc;
  1587. }
  1588. /*
  1589. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  1590. */
  1591. static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
  1592. struct abts_recv_from_24xx *abts, uint32_t status,
  1593. bool ids_reversed)
  1594. {
  1595. struct scsi_qla_host *vha = qpair->vha;
  1596. struct qla_hw_data *ha = vha->hw;
  1597. struct abts_resp_to_24xx *resp;
  1598. __le32 f_ctl;
  1599. uint8_t *p;
  1600. ql_dbg(ql_dbg_tgt, vha, 0xe006,
  1601. "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
  1602. ha, abts, status);
  1603. resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair,
  1604. NULL);
  1605. if (!resp) {
  1606. ql_dbg(ql_dbg_tgt, vha, 0xe04a,
  1607. "qla_target(%d): %s failed: unable to allocate "
  1608. "request packet", vha->vp_idx, __func__);
  1609. return;
  1610. }
  1611. resp->entry_type = ABTS_RESP_24XX;
  1612. resp->handle = QLA_TGT_SKIP_HANDLE;
  1613. resp->entry_count = 1;
  1614. resp->nport_handle = abts->nport_handle;
  1615. resp->vp_index = vha->vp_idx;
  1616. resp->sof_type = abts->sof_type;
  1617. resp->exchange_address = abts->exchange_address;
  1618. resp->fcp_hdr_le = abts->fcp_hdr_le;
  1619. f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
  1620. F_CTL_LAST_SEQ | F_CTL_END_SEQ |
  1621. F_CTL_SEQ_INITIATIVE);
  1622. p = (uint8_t *)&f_ctl;
  1623. resp->fcp_hdr_le.f_ctl[0] = *p++;
  1624. resp->fcp_hdr_le.f_ctl[1] = *p++;
  1625. resp->fcp_hdr_le.f_ctl[2] = *p;
  1626. if (ids_reversed) {
  1627. resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id;
  1628. resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id;
  1629. } else {
  1630. resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
  1631. resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
  1632. }
  1633. resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
  1634. if (status == FCP_TMF_CMPL) {
  1635. resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
  1636. resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
  1637. resp->payload.ba_acct.low_seq_cnt = 0x0000;
  1638. resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
  1639. resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
  1640. resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
  1641. } else {
  1642. resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
  1643. resp->payload.ba_rjt.reason_code =
  1644. BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
  1645. /* Other bytes are zero */
  1646. }
  1647. vha->vha_tgt.qla_tgt->abts_resp_expected++;
  1648. /* Memory Barrier */
  1649. wmb();
  1650. if (qpair->reqq_start_iocbs)
  1651. qpair->reqq_start_iocbs(qpair);
  1652. else
  1653. qla2x00_start_iocbs(vha, qpair->req);
  1654. }
  1655. /*
  1656. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  1657. */
  1658. static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
  1659. struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
  1660. {
  1661. struct ctio7_to_24xx *ctio;
  1662. u16 tmp;
  1663. struct abts_recv_from_24xx *entry;
  1664. ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL);
  1665. if (ctio == NULL) {
  1666. ql_dbg(ql_dbg_tgt, vha, 0xe04b,
  1667. "qla_target(%d): %s failed: unable to allocate "
  1668. "request packet\n", vha->vp_idx, __func__);
  1669. return;
  1670. }
  1671. if (mcmd)
  1672. /* abts from remote port */
  1673. entry = &mcmd->orig_iocb.abts;
  1674. else
  1675. /* abts from this driver. */
  1676. entry = (struct abts_recv_from_24xx *)pkt;
  1677. /*
  1678. * We've got on entrance firmware's response on by us generated
  1679. * ABTS response. So, in it ID fields are reversed.
  1680. */
  1681. ctio->entry_type = CTIO_TYPE7;
  1682. ctio->entry_count = 1;
  1683. ctio->nport_handle = entry->nport_handle;
  1684. ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
  1685. ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
  1686. ctio->vp_index = vha->vp_idx;
  1687. ctio->exchange_addr = entry->exchange_addr_to_abort;
  1688. tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
  1689. if (mcmd) {
  1690. ctio->initiator_id = entry->fcp_hdr_le.s_id;
  1691. if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
  1692. tmp |= (mcmd->abort_io_attr << 9);
  1693. else if (qpair->retry_term_cnt & 1)
  1694. tmp |= (0x4 << 9);
  1695. } else {
  1696. ctio->initiator_id = entry->fcp_hdr_le.d_id;
  1697. if (qpair->retry_term_cnt & 1)
  1698. tmp |= (0x4 << 9);
  1699. }
  1700. ctio->u.status1.flags = cpu_to_le16(tmp);
  1701. ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
  1702. ql_dbg(ql_dbg_tgt, vha, 0xe007,
  1703. "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
  1704. le16_to_cpu(ctio->u.status1.flags),
  1705. le16_to_cpu(ctio->u.status1.ox_id),
  1706. (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0);
  1707. /* Memory Barrier */
  1708. wmb();
  1709. if (qpair->reqq_start_iocbs)
  1710. qpair->reqq_start_iocbs(qpair);
  1711. else
  1712. qla2x00_start_iocbs(vha, qpair->req);
  1713. if (mcmd)
  1714. qlt_build_abts_resp_iocb(mcmd);
  1715. else
  1716. qlt_24xx_send_abts_resp(qpair,
  1717. (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true);
  1718. }
  1719. /* drop cmds for the given lun
  1720. * XXX only looks for cmds on the port through which lun reset was recieved
  1721. * XXX does not go through the list of other port (which may have cmds
  1722. * for the same lun)
  1723. */
  1724. static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
  1725. {
  1726. struct qla_tgt_sess_op *op;
  1727. struct qla_tgt_cmd *cmd;
  1728. uint32_t key;
  1729. unsigned long flags;
  1730. key = sid_to_key(s_id);
  1731. spin_lock_irqsave(&vha->cmd_list_lock, flags);
  1732. list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
  1733. uint32_t op_key;
  1734. u64 op_lun;
  1735. op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
  1736. op_lun = scsilun_to_int(
  1737. (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
  1738. if (op_key == key && op_lun == lun)
  1739. op->aborted = true;
  1740. }
  1741. list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
  1742. uint32_t cmd_key;
  1743. u64 cmd_lun;
  1744. cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
  1745. cmd_lun = scsilun_to_int(
  1746. (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
  1747. if (cmd_key == key && cmd_lun == lun)
  1748. cmd->aborted = 1;
  1749. }
  1750. spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
  1751. }
  1752. static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha,
  1753. uint64_t unpacked_lun)
  1754. {
  1755. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  1756. struct qla_qpair_hint *h = NULL;
  1757. if (vha->flags.qpairs_available) {
  1758. h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun);
  1759. if (!h)
  1760. h = &tgt->qphints[0];
  1761. } else {
  1762. h = &tgt->qphints[0];
  1763. }
  1764. return h;
  1765. }
  1766. static void qlt_do_tmr_work(struct work_struct *work)
  1767. {
  1768. struct qla_tgt_mgmt_cmd *mcmd =
  1769. container_of(work, struct qla_tgt_mgmt_cmd, work);
  1770. struct qla_hw_data *ha = mcmd->vha->hw;
  1771. int rc;
  1772. uint32_t tag;
  1773. unsigned long flags;
  1774. switch (mcmd->tmr_func) {
  1775. case QLA_TGT_ABTS:
  1776. tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort);
  1777. break;
  1778. default:
  1779. tag = 0;
  1780. break;
  1781. }
  1782. rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun,
  1783. mcmd->tmr_func, tag);
  1784. if (rc != 0) {
  1785. spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
  1786. switch (mcmd->tmr_func) {
  1787. case QLA_TGT_ABTS:
  1788. mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
  1789. qlt_build_abts_resp_iocb(mcmd);
  1790. break;
  1791. case QLA_TGT_LUN_RESET:
  1792. case QLA_TGT_CLEAR_TS:
  1793. case QLA_TGT_ABORT_TS:
  1794. case QLA_TGT_CLEAR_ACA:
  1795. case QLA_TGT_TARGET_RESET:
  1796. qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio,
  1797. qla_sam_status);
  1798. break;
  1799. case QLA_TGT_ABORT_ALL:
  1800. case QLA_TGT_NEXUS_LOSS_SESS:
  1801. case QLA_TGT_NEXUS_LOSS:
  1802. qlt_send_notify_ack(mcmd->qpair,
  1803. &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
  1804. break;
  1805. }
  1806. spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags);
  1807. ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
  1808. "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
  1809. mcmd->vha->vp_idx, rc);
  1810. mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
  1811. }
  1812. }
  1813. /* ha->hardware_lock supposed to be held on entry */
  1814. static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
  1815. struct abts_recv_from_24xx *abts, struct fc_port *sess)
  1816. {
  1817. struct qla_hw_data *ha = vha->hw;
  1818. struct qla_tgt_mgmt_cmd *mcmd;
  1819. struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
  1820. struct qla_tgt_cmd *abort_cmd;
  1821. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
  1822. "qla_target(%d): task abort (tag=%d)\n",
  1823. vha->vp_idx, abts->exchange_addr_to_abort);
  1824. mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
  1825. if (mcmd == NULL) {
  1826. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
  1827. "qla_target(%d): %s: Allocation of ABORT cmd failed",
  1828. vha->vp_idx, __func__);
  1829. return -ENOMEM;
  1830. }
  1831. memset(mcmd, 0, sizeof(*mcmd));
  1832. mcmd->cmd_type = TYPE_TGT_TMCMD;
  1833. mcmd->sess = sess;
  1834. memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
  1835. mcmd->reset_count = ha->base_qpair->chip_reset;
  1836. mcmd->tmr_func = QLA_TGT_ABTS;
  1837. mcmd->qpair = h->qpair;
  1838. mcmd->vha = vha;
  1839. /*
  1840. * LUN is looked up by target-core internally based on the passed
  1841. * abts->exchange_addr_to_abort tag.
  1842. */
  1843. mcmd->se_cmd.cpuid = h->cpuid;
  1844. abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
  1845. le32_to_cpu(abts->exchange_addr_to_abort));
  1846. if (!abort_cmd) {
  1847. mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
  1848. return -EIO;
  1849. }
  1850. mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
  1851. if (abort_cmd->qpair) {
  1852. mcmd->qpair = abort_cmd->qpair;
  1853. mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
  1854. mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
  1855. mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
  1856. }
  1857. INIT_WORK(&mcmd->work, qlt_do_tmr_work);
  1858. queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work);
  1859. return 0;
  1860. }
  1861. /*
  1862. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  1863. */
  1864. static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
  1865. struct abts_recv_from_24xx *abts)
  1866. {
  1867. struct qla_hw_data *ha = vha->hw;
  1868. struct fc_port *sess;
  1869. uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort);
  1870. be_id_t s_id;
  1871. int rc;
  1872. unsigned long flags;
  1873. if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
  1874. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
  1875. "qla_target(%d): ABTS: Abort Sequence not "
  1876. "supported\n", vha->vp_idx);
  1877. qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
  1878. false);
  1879. return;
  1880. }
  1881. if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
  1882. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
  1883. "qla_target(%d): ABTS: Unknown Exchange "
  1884. "Address received\n", vha->vp_idx);
  1885. qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
  1886. false);
  1887. return;
  1888. }
  1889. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
  1890. "qla_target(%d): task abort (s_id=%x:%x:%x, "
  1891. "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain,
  1892. abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag,
  1893. le32_to_cpu(abts->fcp_hdr_le.parameter));
  1894. s_id = le_id_to_be(abts->fcp_hdr_le.s_id);
  1895. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  1896. sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
  1897. if (!sess) {
  1898. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
  1899. "qla_target(%d): task abort for non-existent session\n",
  1900. vha->vp_idx);
  1901. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  1902. qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
  1903. false);
  1904. return;
  1905. }
  1906. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  1907. if (sess->deleted) {
  1908. qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
  1909. false);
  1910. return;
  1911. }
  1912. rc = __qlt_24xx_handle_abts(vha, abts, sess);
  1913. if (rc != 0) {
  1914. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
  1915. "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
  1916. vha->vp_idx, rc);
  1917. qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
  1918. false);
  1919. return;
  1920. }
  1921. }
  1922. /*
  1923. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  1924. */
  1925. static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
  1926. struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
  1927. {
  1928. struct scsi_qla_host *ha = mcmd->vha;
  1929. struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
  1930. struct ctio7_to_24xx *ctio;
  1931. uint16_t temp;
  1932. ql_dbg(ql_dbg_tgt, ha, 0xe008,
  1933. "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
  1934. ha, atio, resp_code);
  1935. ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL);
  1936. if (ctio == NULL) {
  1937. ql_dbg(ql_dbg_tgt, ha, 0xe04c,
  1938. "qla_target(%d): %s failed: unable to allocate "
  1939. "request packet\n", ha->vp_idx, __func__);
  1940. return;
  1941. }
  1942. ctio->entry_type = CTIO_TYPE7;
  1943. ctio->entry_count = 1;
  1944. ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
  1945. ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id);
  1946. ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
  1947. ctio->vp_index = ha->vp_idx;
  1948. ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
  1949. ctio->exchange_addr = atio->u.isp24.exchange_addr;
  1950. temp = (atio->u.isp24.attr << 9)|
  1951. CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
  1952. ctio->u.status1.flags = cpu_to_le16(temp);
  1953. temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
  1954. ctio->u.status1.ox_id = cpu_to_le16(temp);
  1955. ctio->u.status1.scsi_status =
  1956. cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
  1957. ctio->u.status1.response_len = cpu_to_le16(8);
  1958. ctio->u.status1.sense_data[0] = resp_code;
  1959. /* Memory Barrier */
  1960. wmb();
  1961. if (qpair->reqq_start_iocbs)
  1962. qpair->reqq_start_iocbs(qpair);
  1963. else
  1964. qla2x00_start_iocbs(ha, qpair->req);
  1965. }
  1966. void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
  1967. {
  1968. mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
  1969. }
  1970. EXPORT_SYMBOL(qlt_free_mcmd);
  1971. /*
  1972. * ha->hardware_lock supposed to be held on entry. Might drop it, then
  1973. * reacquire
  1974. */
  1975. void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
  1976. uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
  1977. {
  1978. struct atio_from_isp *atio = &cmd->atio;
  1979. struct ctio7_to_24xx *ctio;
  1980. uint16_t temp;
  1981. struct scsi_qla_host *vha = cmd->vha;
  1982. ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
  1983. "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
  1984. "sense_key=%02x, asc=%02x, ascq=%02x",
  1985. vha, atio, scsi_status, sense_key, asc, ascq);
  1986. ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
  1987. if (!ctio) {
  1988. ql_dbg(ql_dbg_async, vha, 0x3067,
  1989. "qla2x00t(%ld): %s failed: unable to allocate request packet",
  1990. vha->host_no, __func__);
  1991. goto out;
  1992. }
  1993. ctio->entry_type = CTIO_TYPE7;
  1994. ctio->entry_count = 1;
  1995. ctio->handle = QLA_TGT_SKIP_HANDLE;
  1996. ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id);
  1997. ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
  1998. ctio->vp_index = vha->vp_idx;
  1999. ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
  2000. ctio->exchange_addr = atio->u.isp24.exchange_addr;
  2001. temp = (atio->u.isp24.attr << 9) |
  2002. CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
  2003. ctio->u.status1.flags = cpu_to_le16(temp);
  2004. temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
  2005. ctio->u.status1.ox_id = cpu_to_le16(temp);
  2006. ctio->u.status1.scsi_status =
  2007. cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
  2008. ctio->u.status1.response_len = cpu_to_le16(18);
  2009. ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
  2010. if (ctio->u.status1.residual != 0)
  2011. ctio->u.status1.scsi_status |=
  2012. cpu_to_le16(SS_RESIDUAL_UNDER);
  2013. /* Fixed format sense data. */
  2014. ctio->u.status1.sense_data[0] = 0x70;
  2015. ctio->u.status1.sense_data[2] = sense_key;
  2016. /* Additional sense length */
  2017. ctio->u.status1.sense_data[7] = 0xa;
  2018. /* ASC and ASCQ */
  2019. ctio->u.status1.sense_data[12] = asc;
  2020. ctio->u.status1.sense_data[13] = ascq;
  2021. /* Memory Barrier */
  2022. wmb();
  2023. if (qpair->reqq_start_iocbs)
  2024. qpair->reqq_start_iocbs(qpair);
  2025. else
  2026. qla2x00_start_iocbs(vha, qpair->req);
  2027. out:
  2028. return;
  2029. }
  2030. /* callback from target fabric module code */
  2031. void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
  2032. {
  2033. struct scsi_qla_host *vha = mcmd->sess->vha;
  2034. struct qla_hw_data *ha = vha->hw;
  2035. unsigned long flags;
  2036. struct qla_qpair *qpair = mcmd->qpair;
  2037. bool free_mcmd = true;
  2038. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
  2039. "TM response mcmd (%p) status %#x state %#x",
  2040. mcmd, mcmd->fc_tm_rsp, mcmd->flags);
  2041. spin_lock_irqsave(qpair->qp_lock_ptr, flags);
  2042. if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
  2043. /*
  2044. * Either the port is not online or this request was from
  2045. * previous life, just abort the processing.
  2046. */
  2047. ql_dbg(ql_dbg_async, vha, 0xe100,
  2048. "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
  2049. vha->flags.online, qla2x00_reset_active(vha),
  2050. mcmd->reset_count, qpair->chip_reset);
  2051. ha->tgt.tgt_ops->free_mcmd(mcmd);
  2052. spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
  2053. return;
  2054. }
  2055. if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
  2056. switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) {
  2057. case ELS_LOGO:
  2058. case ELS_PRLO:
  2059. case ELS_TPRLO:
  2060. ql_dbg(ql_dbg_disc, vha, 0x2106,
  2061. "TM response logo %8phC status %#x state %#x",
  2062. mcmd->sess->port_name, mcmd->fc_tm_rsp,
  2063. mcmd->flags);
  2064. qlt_schedule_sess_for_deletion(mcmd->sess);
  2065. break;
  2066. default:
  2067. qlt_send_notify_ack(vha->hw->base_qpair,
  2068. &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
  2069. break;
  2070. }
  2071. } else {
  2072. if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
  2073. qlt_build_abts_resp_iocb(mcmd);
  2074. free_mcmd = false;
  2075. } else
  2076. qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
  2077. mcmd->fc_tm_rsp);
  2078. }
  2079. /*
  2080. * Make the callback for ->free_mcmd() to queue_work() and invoke
  2081. * target_put_sess_cmd() to drop cmd_kref to 1. The final
  2082. * target_put_sess_cmd() call will be made from TFO->check_stop_free()
  2083. * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
  2084. * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
  2085. * qlt_xmit_tm_rsp() returns here..
  2086. */
  2087. if (free_mcmd)
  2088. ha->tgt.tgt_ops->free_mcmd(mcmd);
  2089. spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
  2090. }
  2091. EXPORT_SYMBOL(qlt_xmit_tm_rsp);
  2092. /* No locks */
  2093. static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
  2094. {
  2095. struct qla_tgt_cmd *cmd = prm->cmd;
  2096. BUG_ON(cmd->sg_cnt == 0);
  2097. prm->sg = (struct scatterlist *)cmd->sg;
  2098. prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
  2099. cmd->sg_cnt, cmd->dma_data_direction);
  2100. if (unlikely(prm->seg_cnt == 0))
  2101. goto out_err;
  2102. prm->cmd->sg_mapped = 1;
  2103. if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
  2104. /*
  2105. * If greater than four sg entries then we need to allocate
  2106. * the continuation entries
  2107. */
  2108. if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX)
  2109. prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
  2110. QLA_TGT_DATASEGS_PER_CMD_24XX,
  2111. QLA_TGT_DATASEGS_PER_CONT_24XX);
  2112. } else {
  2113. /* DIF */
  2114. if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
  2115. (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
  2116. prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
  2117. prm->tot_dsds = prm->seg_cnt;
  2118. } else
  2119. prm->tot_dsds = prm->seg_cnt;
  2120. if (cmd->prot_sg_cnt) {
  2121. prm->prot_sg = cmd->prot_sg;
  2122. prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
  2123. cmd->prot_sg, cmd->prot_sg_cnt,
  2124. cmd->dma_data_direction);
  2125. if (unlikely(prm->prot_seg_cnt == 0))
  2126. goto out_err;
  2127. if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
  2128. (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
  2129. /* Dif Bundling not support here */
  2130. prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
  2131. cmd->blk_sz);
  2132. prm->tot_dsds += prm->prot_seg_cnt;
  2133. } else
  2134. prm->tot_dsds += prm->prot_seg_cnt;
  2135. }
  2136. }
  2137. return 0;
  2138. out_err:
  2139. ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d,
  2140. "qla_target(%d): PCI mapping failed: sg_cnt=%d",
  2141. 0, prm->cmd->sg_cnt);
  2142. return -1;
  2143. }
  2144. static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
  2145. {
  2146. struct qla_hw_data *ha;
  2147. struct qla_qpair *qpair;
  2148. if (!cmd->sg_mapped)
  2149. return;
  2150. qpair = cmd->qpair;
  2151. dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
  2152. cmd->dma_data_direction);
  2153. cmd->sg_mapped = 0;
  2154. if (cmd->prot_sg_cnt)
  2155. dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
  2156. cmd->dma_data_direction);
  2157. if (!cmd->ctx)
  2158. return;
  2159. ha = vha->hw;
  2160. if (cmd->ctx_dsd_alloced)
  2161. qla2x00_clean_dsd_pool(ha, cmd->ctx);
  2162. dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
  2163. }
  2164. static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
  2165. uint32_t req_cnt)
  2166. {
  2167. uint32_t cnt;
  2168. struct req_que *req = qpair->req;
  2169. if (req->cnt < (req_cnt + 2)) {
  2170. cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
  2171. rd_reg_dword_relaxed(req->req_q_out));
  2172. if (req->ring_index < cnt)
  2173. req->cnt = cnt - req->ring_index;
  2174. else
  2175. req->cnt = req->length - (req->ring_index - cnt);
  2176. if (unlikely(req->cnt < (req_cnt + 2)))
  2177. return -EAGAIN;
  2178. }
  2179. req->cnt -= req_cnt;
  2180. return 0;
  2181. }
  2182. /*
  2183. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  2184. */
  2185. static inline void *qlt_get_req_pkt(struct req_que *req)
  2186. {
  2187. /* Adjust ring index. */
  2188. req->ring_index++;
  2189. if (req->ring_index == req->length) {
  2190. req->ring_index = 0;
  2191. req->ring_ptr = req->ring;
  2192. } else {
  2193. req->ring_ptr++;
  2194. }
  2195. return (cont_entry_t *)req->ring_ptr;
  2196. }
  2197. /* ha->hardware_lock supposed to be held on entry */
  2198. static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
  2199. {
  2200. uint32_t h;
  2201. int index;
  2202. uint8_t found = 0;
  2203. struct req_que *req = qpair->req;
  2204. h = req->current_outstanding_cmd;
  2205. for (index = 1; index < req->num_outstanding_cmds; index++) {
  2206. h++;
  2207. if (h == req->num_outstanding_cmds)
  2208. h = 1;
  2209. if (h == QLA_TGT_SKIP_HANDLE)
  2210. continue;
  2211. if (!req->outstanding_cmds[h]) {
  2212. found = 1;
  2213. break;
  2214. }
  2215. }
  2216. if (found) {
  2217. req->current_outstanding_cmd = h;
  2218. } else {
  2219. ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
  2220. "qla_target(%d): Ran out of empty cmd slots\n",
  2221. qpair->vha->vp_idx);
  2222. h = QLA_TGT_NULL_HANDLE;
  2223. }
  2224. return h;
  2225. }
  2226. /* ha->hardware_lock supposed to be held on entry */
  2227. static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
  2228. struct qla_tgt_prm *prm)
  2229. {
  2230. uint32_t h;
  2231. struct ctio7_to_24xx *pkt;
  2232. struct atio_from_isp *atio = &prm->cmd->atio;
  2233. uint16_t temp;
  2234. struct qla_tgt_cmd *cmd = prm->cmd;
  2235. pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
  2236. prm->pkt = pkt;
  2237. memset(pkt, 0, sizeof(*pkt));
  2238. pkt->entry_type = CTIO_TYPE7;
  2239. pkt->entry_count = (uint8_t)prm->req_cnt;
  2240. pkt->vp_index = prm->cmd->vp_idx;
  2241. h = qlt_make_handle(qpair);
  2242. if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
  2243. /*
  2244. * CTIO type 7 from the firmware doesn't provide a way to
  2245. * know the initiator's LOOP ID, hence we can't find
  2246. * the session and, so, the command.
  2247. */
  2248. return -EAGAIN;
  2249. } else
  2250. qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
  2251. pkt->handle = make_handle(qpair->req->id, h);
  2252. pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
  2253. pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
  2254. pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
  2255. pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
  2256. pkt->exchange_addr = atio->u.isp24.exchange_addr;
  2257. temp = atio->u.isp24.attr << 9;
  2258. pkt->u.status0.flags |= cpu_to_le16(temp);
  2259. temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
  2260. pkt->u.status0.ox_id = cpu_to_le16(temp);
  2261. pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
  2262. if (cmd->edif) {
  2263. if (cmd->dma_data_direction == DMA_TO_DEVICE)
  2264. prm->cmd->sess->edif.rx_bytes += cmd->bufflen;
  2265. if (cmd->dma_data_direction == DMA_FROM_DEVICE)
  2266. prm->cmd->sess->edif.tx_bytes += cmd->bufflen;
  2267. pkt->u.status0.edif_flags |= EF_EN_EDIF;
  2268. }
  2269. return 0;
  2270. }
  2271. /*
  2272. * ha->hardware_lock supposed to be held on entry. We have already made sure
  2273. * that there is sufficient amount of request entries to not drop it.
  2274. */
  2275. static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
  2276. {
  2277. int cnt;
  2278. struct dsd64 *cur_dsd;
  2279. /* Build continuation packets */
  2280. while (prm->seg_cnt > 0) {
  2281. cont_a64_entry_t *cont_pkt64 =
  2282. (cont_a64_entry_t *)qlt_get_req_pkt(
  2283. prm->cmd->qpair->req);
  2284. /*
  2285. * Make sure that from cont_pkt64 none of
  2286. * 64-bit specific fields used for 32-bit
  2287. * addressing. Cast to (cont_entry_t *) for
  2288. * that.
  2289. */
  2290. memset(cont_pkt64, 0, sizeof(*cont_pkt64));
  2291. cont_pkt64->entry_count = 1;
  2292. cont_pkt64->sys_define = 0;
  2293. cont_pkt64->entry_type = CONTINUE_A64_TYPE;
  2294. cur_dsd = cont_pkt64->dsd;
  2295. /* Load continuation entry data segments */
  2296. for (cnt = 0;
  2297. cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
  2298. cnt++, prm->seg_cnt--) {
  2299. append_dsd64(&cur_dsd, prm->sg);
  2300. prm->sg = sg_next(prm->sg);
  2301. }
  2302. }
  2303. }
  2304. /*
  2305. * ha->hardware_lock supposed to be held on entry. We have already made sure
  2306. * that there is sufficient amount of request entries to not drop it.
  2307. */
  2308. static void qlt_load_data_segments(struct qla_tgt_prm *prm)
  2309. {
  2310. int cnt;
  2311. struct dsd64 *cur_dsd;
  2312. struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
  2313. pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
  2314. /* Setup packet address segment pointer */
  2315. cur_dsd = &pkt24->u.status0.dsd;
  2316. /* Set total data segment count */
  2317. if (prm->seg_cnt)
  2318. pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
  2319. if (prm->seg_cnt == 0) {
  2320. /* No data transfer */
  2321. cur_dsd->address = 0;
  2322. cur_dsd->length = 0;
  2323. return;
  2324. }
  2325. /* If scatter gather */
  2326. /* Load command entry data segments */
  2327. for (cnt = 0;
  2328. (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
  2329. cnt++, prm->seg_cnt--) {
  2330. append_dsd64(&cur_dsd, prm->sg);
  2331. prm->sg = sg_next(prm->sg);
  2332. }
  2333. qlt_load_cont_data_segments(prm);
  2334. }
  2335. static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
  2336. {
  2337. return cmd->bufflen > 0;
  2338. }
  2339. static void qlt_print_dif_err(struct qla_tgt_prm *prm)
  2340. {
  2341. struct qla_tgt_cmd *cmd;
  2342. struct scsi_qla_host *vha;
  2343. /* asc 0x10=dif error */
  2344. if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
  2345. cmd = prm->cmd;
  2346. vha = cmd->vha;
  2347. /* ASCQ */
  2348. switch (prm->sense_buffer[13]) {
  2349. case 1:
  2350. ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
  2351. "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
  2352. "se_cmd=%p tag[%x]",
  2353. cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
  2354. cmd->atio.u.isp24.exchange_addr);
  2355. break;
  2356. case 2:
  2357. ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
  2358. "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
  2359. "se_cmd=%p tag[%x]",
  2360. cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
  2361. cmd->atio.u.isp24.exchange_addr);
  2362. break;
  2363. case 3:
  2364. ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
  2365. "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
  2366. "se_cmd=%p tag[%x]",
  2367. cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
  2368. cmd->atio.u.isp24.exchange_addr);
  2369. break;
  2370. default:
  2371. ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
  2372. "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
  2373. "se_cmd=%p tag[%x]",
  2374. cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
  2375. cmd->atio.u.isp24.exchange_addr);
  2376. break;
  2377. }
  2378. ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
  2379. }
  2380. }
  2381. /*
  2382. * Called without ha->hardware_lock held
  2383. */
  2384. static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
  2385. struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
  2386. uint32_t *full_req_cnt)
  2387. {
  2388. struct se_cmd *se_cmd = &cmd->se_cmd;
  2389. struct qla_qpair *qpair = cmd->qpair;
  2390. prm->cmd = cmd;
  2391. prm->tgt = cmd->tgt;
  2392. prm->pkt = NULL;
  2393. prm->rq_result = scsi_status;
  2394. prm->sense_buffer = &cmd->sense_buffer[0];
  2395. prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
  2396. prm->sg = NULL;
  2397. prm->seg_cnt = -1;
  2398. prm->req_cnt = 1;
  2399. prm->residual = 0;
  2400. prm->add_status_pkt = 0;
  2401. prm->prot_sg = NULL;
  2402. prm->prot_seg_cnt = 0;
  2403. prm->tot_dsds = 0;
  2404. if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
  2405. if (qlt_pci_map_calc_cnt(prm) != 0)
  2406. return -EAGAIN;
  2407. }
  2408. *full_req_cnt = prm->req_cnt;
  2409. if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
  2410. prm->residual = se_cmd->residual_count;
  2411. ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c,
  2412. "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
  2413. prm->residual, se_cmd->tag,
  2414. se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
  2415. cmd->bufflen, prm->rq_result);
  2416. prm->rq_result |= SS_RESIDUAL_UNDER;
  2417. } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
  2418. prm->residual = se_cmd->residual_count;
  2419. ql_dbg_qp(ql_dbg_io, qpair, 0x305d,
  2420. "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
  2421. prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
  2422. se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
  2423. prm->rq_result |= SS_RESIDUAL_OVER;
  2424. }
  2425. if (xmit_type & QLA_TGT_XMIT_STATUS) {
  2426. /*
  2427. * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
  2428. * ignored in *xmit_response() below
  2429. */
  2430. if (qlt_has_data(cmd)) {
  2431. if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
  2432. (IS_FWI2_CAPABLE(cmd->vha->hw) &&
  2433. (prm->rq_result != 0))) {
  2434. prm->add_status_pkt = 1;
  2435. (*full_req_cnt)++;
  2436. }
  2437. }
  2438. }
  2439. return 0;
  2440. }
  2441. static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
  2442. int sending_sense)
  2443. {
  2444. if (cmd->qpair->enable_class_2)
  2445. return 0;
  2446. if (sending_sense)
  2447. return cmd->conf_compl_supported;
  2448. else
  2449. return cmd->qpair->enable_explicit_conf &&
  2450. cmd->conf_compl_supported;
  2451. }
  2452. static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
  2453. struct qla_tgt_prm *prm)
  2454. {
  2455. prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
  2456. (uint32_t)sizeof(ctio->u.status1.sense_data));
  2457. ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
  2458. if (qlt_need_explicit_conf(prm->cmd, 0)) {
  2459. ctio->u.status0.flags |= cpu_to_le16(
  2460. CTIO7_FLAGS_EXPLICIT_CONFORM |
  2461. CTIO7_FLAGS_CONFORM_REQ);
  2462. }
  2463. ctio->u.status0.residual = cpu_to_le32(prm->residual);
  2464. ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
  2465. if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
  2466. int i;
  2467. if (qlt_need_explicit_conf(prm->cmd, 1)) {
  2468. if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
  2469. ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017,
  2470. "Skipping EXPLICIT_CONFORM and "
  2471. "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
  2472. "non GOOD status\n");
  2473. goto skip_explict_conf;
  2474. }
  2475. ctio->u.status1.flags |= cpu_to_le16(
  2476. CTIO7_FLAGS_EXPLICIT_CONFORM |
  2477. CTIO7_FLAGS_CONFORM_REQ);
  2478. }
  2479. skip_explict_conf:
  2480. ctio->u.status1.flags &=
  2481. ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
  2482. ctio->u.status1.flags |=
  2483. cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
  2484. ctio->u.status1.scsi_status |=
  2485. cpu_to_le16(SS_SENSE_LEN_VALID);
  2486. ctio->u.status1.sense_length =
  2487. cpu_to_le16(prm->sense_buffer_len);
  2488. for (i = 0; i < prm->sense_buffer_len/4; i++) {
  2489. uint32_t v;
  2490. v = get_unaligned_be32(
  2491. &((uint32_t *)prm->sense_buffer)[i]);
  2492. put_unaligned_le32(v,
  2493. &((uint32_t *)ctio->u.status1.sense_data)[i]);
  2494. }
  2495. qlt_print_dif_err(prm);
  2496. } else {
  2497. ctio->u.status1.flags &=
  2498. ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
  2499. ctio->u.status1.flags |=
  2500. cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
  2501. ctio->u.status1.sense_length = 0;
  2502. memset(ctio->u.status1.sense_data, 0,
  2503. sizeof(ctio->u.status1.sense_data));
  2504. }
  2505. /* Sense with len > 24, is it possible ??? */
  2506. }
  2507. static inline int
  2508. qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
  2509. {
  2510. switch (se_cmd->prot_op) {
  2511. case TARGET_PROT_DOUT_INSERT:
  2512. case TARGET_PROT_DIN_STRIP:
  2513. if (ql2xenablehba_err_chk >= 1)
  2514. return 1;
  2515. break;
  2516. case TARGET_PROT_DOUT_PASS:
  2517. case TARGET_PROT_DIN_PASS:
  2518. if (ql2xenablehba_err_chk >= 2)
  2519. return 1;
  2520. break;
  2521. case TARGET_PROT_DIN_INSERT:
  2522. case TARGET_PROT_DOUT_STRIP:
  2523. return 1;
  2524. default:
  2525. break;
  2526. }
  2527. return 0;
  2528. }
  2529. static inline int
  2530. qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
  2531. {
  2532. switch (se_cmd->prot_op) {
  2533. case TARGET_PROT_DIN_INSERT:
  2534. case TARGET_PROT_DOUT_INSERT:
  2535. case TARGET_PROT_DIN_STRIP:
  2536. case TARGET_PROT_DOUT_STRIP:
  2537. case TARGET_PROT_DIN_PASS:
  2538. case TARGET_PROT_DOUT_PASS:
  2539. return 1;
  2540. default:
  2541. return 0;
  2542. }
  2543. return 0;
  2544. }
  2545. /*
  2546. * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
  2547. */
  2548. static void
  2549. qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
  2550. uint16_t *pfw_prot_opts)
  2551. {
  2552. struct se_cmd *se_cmd = &cmd->se_cmd;
  2553. uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
  2554. scsi_qla_host_t *vha = cmd->tgt->vha;
  2555. struct qla_hw_data *ha = vha->hw;
  2556. uint32_t t32 = 0;
  2557. /*
  2558. * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
  2559. * have been immplemented by TCM, before AppTag is avail.
  2560. * Look for modesense_handlers[]
  2561. */
  2562. ctx->app_tag = 0;
  2563. ctx->app_tag_mask[0] = 0x0;
  2564. ctx->app_tag_mask[1] = 0x0;
  2565. if (IS_PI_UNINIT_CAPABLE(ha)) {
  2566. if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
  2567. (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
  2568. *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
  2569. else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
  2570. *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
  2571. }
  2572. t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
  2573. switch (se_cmd->prot_type) {
  2574. case TARGET_DIF_TYPE0_PROT:
  2575. /*
  2576. * No check for ql2xenablehba_err_chk, as it
  2577. * would be an I/O error if hba tag generation
  2578. * is not done.
  2579. */
  2580. ctx->ref_tag = cpu_to_le32(lba);
  2581. /* enable ALL bytes of the ref tag */
  2582. ctx->ref_tag_mask[0] = 0xff;
  2583. ctx->ref_tag_mask[1] = 0xff;
  2584. ctx->ref_tag_mask[2] = 0xff;
  2585. ctx->ref_tag_mask[3] = 0xff;
  2586. break;
  2587. case TARGET_DIF_TYPE1_PROT:
  2588. /*
  2589. * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
  2590. * REF tag, and 16 bit app tag.
  2591. */
  2592. ctx->ref_tag = cpu_to_le32(lba);
  2593. if (!qla_tgt_ref_mask_check(se_cmd) ||
  2594. !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
  2595. *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
  2596. break;
  2597. }
  2598. /* enable ALL bytes of the ref tag */
  2599. ctx->ref_tag_mask[0] = 0xff;
  2600. ctx->ref_tag_mask[1] = 0xff;
  2601. ctx->ref_tag_mask[2] = 0xff;
  2602. ctx->ref_tag_mask[3] = 0xff;
  2603. break;
  2604. case TARGET_DIF_TYPE2_PROT:
  2605. /*
  2606. * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
  2607. * tag has to match LBA in CDB + N
  2608. */
  2609. ctx->ref_tag = cpu_to_le32(lba);
  2610. if (!qla_tgt_ref_mask_check(se_cmd) ||
  2611. !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
  2612. *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
  2613. break;
  2614. }
  2615. /* enable ALL bytes of the ref tag */
  2616. ctx->ref_tag_mask[0] = 0xff;
  2617. ctx->ref_tag_mask[1] = 0xff;
  2618. ctx->ref_tag_mask[2] = 0xff;
  2619. ctx->ref_tag_mask[3] = 0xff;
  2620. break;
  2621. case TARGET_DIF_TYPE3_PROT:
  2622. /* For TYPE 3 protection: 16 bit GUARD only */
  2623. *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
  2624. ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
  2625. ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
  2626. break;
  2627. }
  2628. }
  2629. static inline int
  2630. qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
  2631. {
  2632. struct dsd64 *cur_dsd;
  2633. uint32_t transfer_length = 0;
  2634. uint32_t data_bytes;
  2635. uint32_t dif_bytes;
  2636. uint8_t bundling = 1;
  2637. struct crc_context *crc_ctx_pkt = NULL;
  2638. struct qla_hw_data *ha;
  2639. struct ctio_crc2_to_fw *pkt;
  2640. dma_addr_t crc_ctx_dma;
  2641. uint16_t fw_prot_opts = 0;
  2642. struct qla_tgt_cmd *cmd = prm->cmd;
  2643. struct se_cmd *se_cmd = &cmd->se_cmd;
  2644. uint32_t h;
  2645. struct atio_from_isp *atio = &prm->cmd->atio;
  2646. struct qla_tc_param tc;
  2647. uint16_t t16;
  2648. scsi_qla_host_t *vha = cmd->vha;
  2649. ha = vha->hw;
  2650. pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
  2651. prm->pkt = pkt;
  2652. memset(pkt, 0, sizeof(*pkt));
  2653. ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071,
  2654. "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
  2655. cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op,
  2656. prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
  2657. if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
  2658. (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
  2659. bundling = 0;
  2660. /* Compute dif len and adjust data len to incude protection */
  2661. data_bytes = cmd->bufflen;
  2662. dif_bytes = (data_bytes / cmd->blk_sz) * 8;
  2663. switch (se_cmd->prot_op) {
  2664. case TARGET_PROT_DIN_INSERT:
  2665. case TARGET_PROT_DOUT_STRIP:
  2666. transfer_length = data_bytes;
  2667. if (cmd->prot_sg_cnt)
  2668. data_bytes += dif_bytes;
  2669. break;
  2670. case TARGET_PROT_DIN_STRIP:
  2671. case TARGET_PROT_DOUT_INSERT:
  2672. case TARGET_PROT_DIN_PASS:
  2673. case TARGET_PROT_DOUT_PASS:
  2674. transfer_length = data_bytes + dif_bytes;
  2675. break;
  2676. default:
  2677. BUG();
  2678. break;
  2679. }
  2680. if (!qlt_hba_err_chk_enabled(se_cmd))
  2681. fw_prot_opts |= 0x10; /* Disable Guard tag checking */
  2682. /* HBA error checking enabled */
  2683. else if (IS_PI_UNINIT_CAPABLE(ha)) {
  2684. if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
  2685. (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
  2686. fw_prot_opts |= PO_DIS_VALD_APP_ESC;
  2687. else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
  2688. fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
  2689. }
  2690. switch (se_cmd->prot_op) {
  2691. case TARGET_PROT_DIN_INSERT:
  2692. case TARGET_PROT_DOUT_INSERT:
  2693. fw_prot_opts |= PO_MODE_DIF_INSERT;
  2694. break;
  2695. case TARGET_PROT_DIN_STRIP:
  2696. case TARGET_PROT_DOUT_STRIP:
  2697. fw_prot_opts |= PO_MODE_DIF_REMOVE;
  2698. break;
  2699. case TARGET_PROT_DIN_PASS:
  2700. case TARGET_PROT_DOUT_PASS:
  2701. fw_prot_opts |= PO_MODE_DIF_PASS;
  2702. /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
  2703. break;
  2704. default:/* Normal Request */
  2705. fw_prot_opts |= PO_MODE_DIF_PASS;
  2706. break;
  2707. }
  2708. /* ---- PKT ---- */
  2709. /* Update entry type to indicate Command Type CRC_2 IOCB */
  2710. pkt->entry_type = CTIO_CRC2;
  2711. pkt->entry_count = 1;
  2712. pkt->vp_index = cmd->vp_idx;
  2713. h = qlt_make_handle(qpair);
  2714. if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
  2715. /*
  2716. * CTIO type 7 from the firmware doesn't provide a way to
  2717. * know the initiator's LOOP ID, hence we can't find
  2718. * the session and, so, the command.
  2719. */
  2720. return -EAGAIN;
  2721. } else
  2722. qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
  2723. pkt->handle = make_handle(qpair->req->id, h);
  2724. pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
  2725. pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
  2726. pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
  2727. pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
  2728. pkt->exchange_addr = atio->u.isp24.exchange_addr;
  2729. /* silence compile warning */
  2730. t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
  2731. pkt->ox_id = cpu_to_le16(t16);
  2732. t16 = (atio->u.isp24.attr << 9);
  2733. pkt->flags |= cpu_to_le16(t16);
  2734. pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
  2735. /* Set transfer direction */
  2736. if (cmd->dma_data_direction == DMA_TO_DEVICE)
  2737. pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
  2738. else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
  2739. pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
  2740. pkt->dseg_count = cpu_to_le16(prm->tot_dsds);
  2741. /* Fibre channel byte count */
  2742. pkt->transfer_length = cpu_to_le32(transfer_length);
  2743. /* ----- CRC context -------- */
  2744. /* Allocate CRC context from global pool */
  2745. crc_ctx_pkt = cmd->ctx =
  2746. dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
  2747. if (!crc_ctx_pkt)
  2748. goto crc_queuing_error;
  2749. crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
  2750. INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
  2751. /* Set handle */
  2752. crc_ctx_pkt->handle = pkt->handle;
  2753. qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
  2754. put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
  2755. pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
  2756. if (!bundling) {
  2757. cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
  2758. } else {
  2759. /*
  2760. * Configure Bundling if we need to fetch interlaving
  2761. * protection PCI accesses
  2762. */
  2763. fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
  2764. crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
  2765. crc_ctx_pkt->u.bundling.dseg_count =
  2766. cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
  2767. cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
  2768. }
  2769. /* Finish the common fields of CRC pkt */
  2770. crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
  2771. crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
  2772. crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
  2773. crc_ctx_pkt->guard_seed = cpu_to_le16(0);
  2774. memset((uint8_t *)&tc, 0 , sizeof(tc));
  2775. tc.vha = vha;
  2776. tc.blk_sz = cmd->blk_sz;
  2777. tc.bufflen = cmd->bufflen;
  2778. tc.sg = cmd->sg;
  2779. tc.prot_sg = cmd->prot_sg;
  2780. tc.ctx = crc_ctx_pkt;
  2781. tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
  2782. /* Walks data segments */
  2783. pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
  2784. if (!bundling && prm->prot_seg_cnt) {
  2785. if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
  2786. prm->tot_dsds, &tc))
  2787. goto crc_queuing_error;
  2788. } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
  2789. (prm->tot_dsds - prm->prot_seg_cnt), &tc))
  2790. goto crc_queuing_error;
  2791. if (bundling && prm->prot_seg_cnt) {
  2792. /* Walks dif segments */
  2793. pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
  2794. cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
  2795. if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
  2796. prm->prot_seg_cnt, cmd))
  2797. goto crc_queuing_error;
  2798. }
  2799. return QLA_SUCCESS;
  2800. crc_queuing_error:
  2801. /* Cleanup will be performed by the caller */
  2802. qpair->req->outstanding_cmds[h] = NULL;
  2803. return QLA_FUNCTION_FAILED;
  2804. }
  2805. /*
  2806. * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
  2807. * QLA_TGT_XMIT_STATUS for >= 24xx silicon
  2808. */
  2809. int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
  2810. uint8_t scsi_status)
  2811. {
  2812. struct scsi_qla_host *vha = cmd->vha;
  2813. struct qla_qpair *qpair = cmd->qpair;
  2814. struct ctio7_to_24xx *pkt;
  2815. struct qla_tgt_prm prm;
  2816. uint32_t full_req_cnt = 0;
  2817. unsigned long flags = 0;
  2818. int res;
  2819. if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
  2820. (cmd->sess && cmd->sess->deleted)) {
  2821. cmd->state = QLA_TGT_STATE_PROCESSED;
  2822. return 0;
  2823. }
  2824. ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
  2825. "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
  2826. (xmit_type & QLA_TGT_XMIT_STATUS) ?
  2827. 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
  2828. &cmd->se_cmd, qpair->id);
  2829. res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
  2830. &full_req_cnt);
  2831. if (unlikely(res != 0)) {
  2832. return res;
  2833. }
  2834. spin_lock_irqsave(qpair->qp_lock_ptr, flags);
  2835. if (xmit_type == QLA_TGT_XMIT_STATUS)
  2836. qpair->tgt_counters.core_qla_snd_status++;
  2837. else
  2838. qpair->tgt_counters.core_qla_que_buf++;
  2839. if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
  2840. /*
  2841. * Either the port is not online or this request was from
  2842. * previous life, just abort the processing.
  2843. */
  2844. cmd->state = QLA_TGT_STATE_PROCESSED;
  2845. ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
  2846. "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
  2847. vha->flags.online, qla2x00_reset_active(vha),
  2848. cmd->reset_count, qpair->chip_reset);
  2849. res = 0;
  2850. goto out_unmap_unlock;
  2851. }
  2852. /* Does F/W have an IOCBs for this request */
  2853. res = qlt_check_reserve_free_req(qpair, full_req_cnt);
  2854. if (unlikely(res))
  2855. goto out_unmap_unlock;
  2856. if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
  2857. res = qlt_build_ctio_crc2_pkt(qpair, &prm);
  2858. else
  2859. res = qlt_24xx_build_ctio_pkt(qpair, &prm);
  2860. if (unlikely(res != 0)) {
  2861. qpair->req->cnt += full_req_cnt;
  2862. goto out_unmap_unlock;
  2863. }
  2864. pkt = (struct ctio7_to_24xx *)prm.pkt;
  2865. if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
  2866. pkt->u.status0.flags |=
  2867. cpu_to_le16(CTIO7_FLAGS_DATA_IN |
  2868. CTIO7_FLAGS_STATUS_MODE_0);
  2869. if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
  2870. qlt_load_data_segments(&prm);
  2871. if (prm.add_status_pkt == 0) {
  2872. if (xmit_type & QLA_TGT_XMIT_STATUS) {
  2873. pkt->u.status0.scsi_status =
  2874. cpu_to_le16(prm.rq_result);
  2875. if (!cmd->edif)
  2876. pkt->u.status0.residual =
  2877. cpu_to_le32(prm.residual);
  2878. pkt->u.status0.flags |= cpu_to_le16(
  2879. CTIO7_FLAGS_SEND_STATUS);
  2880. if (qlt_need_explicit_conf(cmd, 0)) {
  2881. pkt->u.status0.flags |=
  2882. cpu_to_le16(
  2883. CTIO7_FLAGS_EXPLICIT_CONFORM |
  2884. CTIO7_FLAGS_CONFORM_REQ);
  2885. }
  2886. }
  2887. } else {
  2888. /*
  2889. * We have already made sure that there is sufficient
  2890. * amount of request entries to not drop HW lock in
  2891. * req_pkt().
  2892. */
  2893. struct ctio7_to_24xx *ctio =
  2894. (struct ctio7_to_24xx *)qlt_get_req_pkt(
  2895. qpair->req);
  2896. ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e,
  2897. "Building additional status packet 0x%p.\n",
  2898. ctio);
  2899. /*
  2900. * T10Dif: ctio_crc2_to_fw overlay ontop of
  2901. * ctio7_to_24xx
  2902. */
  2903. memcpy(ctio, pkt, sizeof(*ctio));
  2904. /* reset back to CTIO7 */
  2905. ctio->entry_count = 1;
  2906. ctio->entry_type = CTIO_TYPE7;
  2907. ctio->dseg_count = 0;
  2908. ctio->u.status1.flags &= ~cpu_to_le16(
  2909. CTIO7_FLAGS_DATA_IN);
  2910. /* Real finish is ctio_m1's finish */
  2911. pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
  2912. pkt->u.status0.flags |= cpu_to_le16(
  2913. CTIO7_FLAGS_DONT_RET_CTIO);
  2914. /* qlt_24xx_init_ctio_to_isp will correct
  2915. * all neccessary fields that's part of CTIO7.
  2916. * There should be no residual of CTIO-CRC2 data.
  2917. */
  2918. qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
  2919. &prm);
  2920. }
  2921. } else
  2922. qlt_24xx_init_ctio_to_isp(pkt, &prm);
  2923. cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
  2924. cmd->cmd_sent_to_fw = 1;
  2925. cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
  2926. /* Memory Barrier */
  2927. wmb();
  2928. if (qpair->reqq_start_iocbs)
  2929. qpair->reqq_start_iocbs(qpair);
  2930. else
  2931. qla2x00_start_iocbs(vha, qpair->req);
  2932. spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
  2933. return 0;
  2934. out_unmap_unlock:
  2935. qlt_unmap_sg(vha, cmd);
  2936. spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
  2937. return res;
  2938. }
  2939. EXPORT_SYMBOL(qlt_xmit_response);
  2940. int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
  2941. {
  2942. struct ctio7_to_24xx *pkt;
  2943. struct scsi_qla_host *vha = cmd->vha;
  2944. struct qla_tgt *tgt = cmd->tgt;
  2945. struct qla_tgt_prm prm;
  2946. unsigned long flags = 0;
  2947. int res = 0;
  2948. struct qla_qpair *qpair = cmd->qpair;
  2949. memset(&prm, 0, sizeof(prm));
  2950. prm.cmd = cmd;
  2951. prm.tgt = tgt;
  2952. prm.sg = NULL;
  2953. prm.req_cnt = 1;
  2954. if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
  2955. (cmd->sess && cmd->sess->deleted)) {
  2956. /*
  2957. * Either the port is not online or this request was from
  2958. * previous life, just abort the processing.
  2959. */
  2960. cmd->aborted = 1;
  2961. cmd->write_data_transferred = 0;
  2962. cmd->state = QLA_TGT_STATE_DATA_IN;
  2963. vha->hw->tgt.tgt_ops->handle_data(cmd);
  2964. ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
  2965. "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
  2966. vha->flags.online, qla2x00_reset_active(vha),
  2967. cmd->reset_count, qpair->chip_reset);
  2968. return 0;
  2969. }
  2970. /* Calculate number of entries and segments required */
  2971. if (qlt_pci_map_calc_cnt(&prm) != 0)
  2972. return -EAGAIN;
  2973. spin_lock_irqsave(qpair->qp_lock_ptr, flags);
  2974. /* Does F/W have an IOCBs for this request */
  2975. res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
  2976. if (res != 0)
  2977. goto out_unlock_free_unmap;
  2978. if (cmd->se_cmd.prot_op)
  2979. res = qlt_build_ctio_crc2_pkt(qpair, &prm);
  2980. else
  2981. res = qlt_24xx_build_ctio_pkt(qpair, &prm);
  2982. if (unlikely(res != 0)) {
  2983. qpair->req->cnt += prm.req_cnt;
  2984. goto out_unlock_free_unmap;
  2985. }
  2986. pkt = (struct ctio7_to_24xx *)prm.pkt;
  2987. pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
  2988. CTIO7_FLAGS_STATUS_MODE_0);
  2989. if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
  2990. qlt_load_data_segments(&prm);
  2991. cmd->state = QLA_TGT_STATE_NEED_DATA;
  2992. cmd->cmd_sent_to_fw = 1;
  2993. cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
  2994. /* Memory Barrier */
  2995. wmb();
  2996. if (qpair->reqq_start_iocbs)
  2997. qpair->reqq_start_iocbs(qpair);
  2998. else
  2999. qla2x00_start_iocbs(vha, qpair->req);
  3000. spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
  3001. return res;
  3002. out_unlock_free_unmap:
  3003. qlt_unmap_sg(vha, cmd);
  3004. spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
  3005. return res;
  3006. }
  3007. EXPORT_SYMBOL(qlt_rdy_to_xfer);
  3008. /*
  3009. * it is assumed either hardware_lock or qpair lock is held.
  3010. */
  3011. static void
  3012. qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
  3013. struct ctio_crc_from_fw *sts)
  3014. {
  3015. uint8_t *ap = &sts->actual_dif[0];
  3016. uint8_t *ep = &sts->expected_dif[0];
  3017. uint64_t lba = cmd->se_cmd.t_task_lba;
  3018. uint8_t scsi_status, sense_key, asc, ascq;
  3019. unsigned long flags;
  3020. struct scsi_qla_host *vha = cmd->vha;
  3021. cmd->trc_flags |= TRC_DIF_ERR;
  3022. cmd->a_guard = get_unaligned_be16(ap + 0);
  3023. cmd->a_app_tag = get_unaligned_be16(ap + 2);
  3024. cmd->a_ref_tag = get_unaligned_be32(ap + 4);
  3025. cmd->e_guard = get_unaligned_be16(ep + 0);
  3026. cmd->e_app_tag = get_unaligned_be16(ep + 2);
  3027. cmd->e_ref_tag = get_unaligned_be32(ep + 4);
  3028. ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
  3029. "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
  3030. scsi_status = sense_key = asc = ascq = 0;
  3031. /* check appl tag */
  3032. if (cmd->e_app_tag != cmd->a_app_tag) {
  3033. ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
  3034. "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
  3035. cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
  3036. cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
  3037. cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
  3038. cmd->atio.u.isp24.fcp_hdr.ox_id);
  3039. cmd->dif_err_code = DIF_ERR_APP;
  3040. scsi_status = SAM_STAT_CHECK_CONDITION;
  3041. sense_key = ABORTED_COMMAND;
  3042. asc = 0x10;
  3043. ascq = 0x2;
  3044. }
  3045. /* check ref tag */
  3046. if (cmd->e_ref_tag != cmd->a_ref_tag) {
  3047. ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
  3048. "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
  3049. cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
  3050. cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
  3051. cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
  3052. cmd->atio.u.isp24.fcp_hdr.ox_id);
  3053. cmd->dif_err_code = DIF_ERR_REF;
  3054. scsi_status = SAM_STAT_CHECK_CONDITION;
  3055. sense_key = ABORTED_COMMAND;
  3056. asc = 0x10;
  3057. ascq = 0x3;
  3058. goto out;
  3059. }
  3060. /* check guard */
  3061. if (cmd->e_guard != cmd->a_guard) {
  3062. ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
  3063. "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
  3064. cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
  3065. cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
  3066. cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
  3067. cmd->atio.u.isp24.fcp_hdr.ox_id);
  3068. cmd->dif_err_code = DIF_ERR_GRD;
  3069. scsi_status = SAM_STAT_CHECK_CONDITION;
  3070. sense_key = ABORTED_COMMAND;
  3071. asc = 0x10;
  3072. ascq = 0x1;
  3073. }
  3074. out:
  3075. switch (cmd->state) {
  3076. case QLA_TGT_STATE_NEED_DATA:
  3077. /* handle_data will load DIF error code */
  3078. cmd->state = QLA_TGT_STATE_DATA_IN;
  3079. vha->hw->tgt.tgt_ops->handle_data(cmd);
  3080. break;
  3081. default:
  3082. spin_lock_irqsave(&cmd->cmd_lock, flags);
  3083. if (cmd->aborted) {
  3084. spin_unlock_irqrestore(&cmd->cmd_lock, flags);
  3085. vha->hw->tgt.tgt_ops->free_cmd(cmd);
  3086. break;
  3087. }
  3088. spin_unlock_irqrestore(&cmd->cmd_lock, flags);
  3089. qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
  3090. ascq);
  3091. /* assume scsi status gets out on the wire.
  3092. * Will not wait for completion.
  3093. */
  3094. vha->hw->tgt.tgt_ops->free_cmd(cmd);
  3095. break;
  3096. }
  3097. }
  3098. /* If hardware_lock held on entry, might drop it, then reaquire */
  3099. /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
  3100. static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
  3101. struct imm_ntfy_from_isp *ntfy)
  3102. {
  3103. struct nack_to_isp *nack;
  3104. struct qla_hw_data *ha = vha->hw;
  3105. request_t *pkt;
  3106. int ret = 0;
  3107. ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
  3108. "Sending TERM ELS CTIO (ha=%p)\n", ha);
  3109. pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
  3110. if (pkt == NULL) {
  3111. ql_dbg(ql_dbg_tgt, vha, 0xe080,
  3112. "qla_target(%d): %s failed: unable to allocate "
  3113. "request packet\n", vha->vp_idx, __func__);
  3114. return -ENOMEM;
  3115. }
  3116. pkt->entry_type = NOTIFY_ACK_TYPE;
  3117. pkt->entry_count = 1;
  3118. pkt->handle = QLA_TGT_SKIP_HANDLE;
  3119. nack = (struct nack_to_isp *)pkt;
  3120. nack->ox_id = ntfy->ox_id;
  3121. nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
  3122. if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
  3123. nack->u.isp24.flags = ntfy->u.isp24.flags &
  3124. cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
  3125. }
  3126. /* terminate */
  3127. nack->u.isp24.flags |=
  3128. __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
  3129. nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
  3130. nack->u.isp24.status = ntfy->u.isp24.status;
  3131. nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
  3132. nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
  3133. nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
  3134. nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
  3135. nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
  3136. nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
  3137. qla2x00_start_iocbs(vha, vha->req);
  3138. return ret;
  3139. }
  3140. static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
  3141. struct imm_ntfy_from_isp *imm, int ha_locked)
  3142. {
  3143. int rc;
  3144. WARN_ON_ONCE(!ha_locked);
  3145. rc = __qlt_send_term_imm_notif(vha, imm);
  3146. pr_debug("rc = %d\n", rc);
  3147. }
  3148. /*
  3149. * If hardware_lock held on entry, might drop it, then reaquire
  3150. * This function sends the appropriate CTIO to ISP 2xxx or 24xx
  3151. */
  3152. static int __qlt_send_term_exchange(struct qla_qpair *qpair,
  3153. struct qla_tgt_cmd *cmd,
  3154. struct atio_from_isp *atio)
  3155. {
  3156. struct scsi_qla_host *vha = qpair->vha;
  3157. struct ctio7_to_24xx *ctio24;
  3158. struct qla_hw_data *ha = vha->hw;
  3159. request_t *pkt;
  3160. int ret = 0;
  3161. uint16_t temp;
  3162. ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
  3163. if (cmd)
  3164. vha = cmd->vha;
  3165. pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
  3166. if (pkt == NULL) {
  3167. ql_dbg(ql_dbg_tgt, vha, 0xe050,
  3168. "qla_target(%d): %s failed: unable to allocate "
  3169. "request packet\n", vha->vp_idx, __func__);
  3170. return -ENOMEM;
  3171. }
  3172. if (cmd != NULL) {
  3173. if (cmd->state < QLA_TGT_STATE_PROCESSED) {
  3174. ql_dbg(ql_dbg_tgt, vha, 0xe051,
  3175. "qla_target(%d): Terminating cmd %p with "
  3176. "incorrect state %d\n", vha->vp_idx, cmd,
  3177. cmd->state);
  3178. } else
  3179. ret = 1;
  3180. }
  3181. qpair->tgt_counters.num_term_xchg_sent++;
  3182. pkt->entry_count = 1;
  3183. pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
  3184. ctio24 = (struct ctio7_to_24xx *)pkt;
  3185. ctio24->entry_type = CTIO_TYPE7;
  3186. ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED);
  3187. ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
  3188. ctio24->vp_index = vha->vp_idx;
  3189. ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
  3190. ctio24->exchange_addr = atio->u.isp24.exchange_addr;
  3191. temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
  3192. CTIO7_FLAGS_TERMINATE;
  3193. ctio24->u.status1.flags = cpu_to_le16(temp);
  3194. temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
  3195. ctio24->u.status1.ox_id = cpu_to_le16(temp);
  3196. /* Memory Barrier */
  3197. wmb();
  3198. if (qpair->reqq_start_iocbs)
  3199. qpair->reqq_start_iocbs(qpair);
  3200. else
  3201. qla2x00_start_iocbs(vha, qpair->req);
  3202. return ret;
  3203. }
  3204. static void qlt_send_term_exchange(struct qla_qpair *qpair,
  3205. struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
  3206. int ul_abort)
  3207. {
  3208. struct scsi_qla_host *vha;
  3209. unsigned long flags = 0;
  3210. int rc;
  3211. /* why use different vha? NPIV */
  3212. if (cmd)
  3213. vha = cmd->vha;
  3214. else
  3215. vha = qpair->vha;
  3216. if (ha_locked) {
  3217. rc = __qlt_send_term_exchange(qpair, cmd, atio);
  3218. if (rc == -ENOMEM)
  3219. qlt_alloc_qfull_cmd(vha, atio, 0, 0);
  3220. goto done;
  3221. }
  3222. spin_lock_irqsave(qpair->qp_lock_ptr, flags);
  3223. rc = __qlt_send_term_exchange(qpair, cmd, atio);
  3224. if (rc == -ENOMEM)
  3225. qlt_alloc_qfull_cmd(vha, atio, 0, 0);
  3226. done:
  3227. if (cmd && !ul_abort && !cmd->aborted) {
  3228. if (cmd->sg_mapped)
  3229. qlt_unmap_sg(vha, cmd);
  3230. vha->hw->tgt.tgt_ops->free_cmd(cmd);
  3231. }
  3232. if (!ha_locked)
  3233. spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
  3234. return;
  3235. }
  3236. static void qlt_init_term_exchange(struct scsi_qla_host *vha)
  3237. {
  3238. struct list_head free_list;
  3239. struct qla_tgt_cmd *cmd, *tcmd;
  3240. vha->hw->tgt.leak_exchg_thresh_hold =
  3241. (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
  3242. cmd = tcmd = NULL;
  3243. if (!list_empty(&vha->hw->tgt.q_full_list)) {
  3244. INIT_LIST_HEAD(&free_list);
  3245. list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
  3246. list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
  3247. list_del(&cmd->cmd_list);
  3248. /* This cmd was never sent to TCM. There is no need
  3249. * to schedule free or call free_cmd
  3250. */
  3251. qlt_free_cmd(cmd);
  3252. vha->hw->tgt.num_qfull_cmds_alloc--;
  3253. }
  3254. }
  3255. vha->hw->tgt.num_qfull_cmds_dropped = 0;
  3256. }
  3257. static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
  3258. {
  3259. uint32_t total_leaked;
  3260. total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
  3261. if (vha->hw->tgt.leak_exchg_thresh_hold &&
  3262. (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
  3263. ql_dbg(ql_dbg_tgt, vha, 0xe079,
  3264. "Chip reset due to exchange starvation: %d/%d.\n",
  3265. total_leaked, vha->hw->cur_fw_xcb_count);
  3266. if (IS_P3P_TYPE(vha->hw))
  3267. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  3268. else
  3269. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  3270. qla2xxx_wake_dpc(vha);
  3271. }
  3272. }
  3273. int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
  3274. {
  3275. struct qla_tgt *tgt = cmd->tgt;
  3276. struct scsi_qla_host *vha = tgt->vha;
  3277. struct se_cmd *se_cmd = &cmd->se_cmd;
  3278. unsigned long flags;
  3279. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
  3280. "qla_target(%d): terminating exchange for aborted cmd=%p "
  3281. "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
  3282. se_cmd->tag);
  3283. spin_lock_irqsave(&cmd->cmd_lock, flags);
  3284. if (cmd->aborted) {
  3285. if (cmd->sg_mapped)
  3286. qlt_unmap_sg(vha, cmd);
  3287. spin_unlock_irqrestore(&cmd->cmd_lock, flags);
  3288. /*
  3289. * It's normal to see 2 calls in this path:
  3290. * 1) XFER Rdy completion + CMD_T_ABORT
  3291. * 2) TCM TMR - drain_state_list
  3292. */
  3293. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
  3294. "multiple abort. %p transport_state %x, t_state %x, "
  3295. "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
  3296. cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
  3297. return -EIO;
  3298. }
  3299. cmd->aborted = 1;
  3300. cmd->trc_flags |= TRC_ABORT;
  3301. spin_unlock_irqrestore(&cmd->cmd_lock, flags);
  3302. qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
  3303. return 0;
  3304. }
  3305. EXPORT_SYMBOL(qlt_abort_cmd);
  3306. void qlt_free_cmd(struct qla_tgt_cmd *cmd)
  3307. {
  3308. struct fc_port *sess = cmd->sess;
  3309. ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
  3310. "%s: se_cmd[%p] ox_id %04x\n",
  3311. __func__, &cmd->se_cmd,
  3312. be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
  3313. BUG_ON(cmd->cmd_in_wq);
  3314. if (!cmd->q_full)
  3315. qlt_decr_num_pend_cmds(cmd->vha);
  3316. BUG_ON(cmd->sg_mapped);
  3317. cmd->jiffies_at_free = get_jiffies_64();
  3318. if (!sess || !sess->se_sess) {
  3319. WARN_ON(1);
  3320. return;
  3321. }
  3322. cmd->jiffies_at_free = get_jiffies_64();
  3323. cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
  3324. }
  3325. EXPORT_SYMBOL(qlt_free_cmd);
  3326. /*
  3327. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  3328. */
  3329. static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
  3330. struct qla_tgt_cmd *cmd, uint32_t status)
  3331. {
  3332. int term = 0;
  3333. struct scsi_qla_host *vha = qpair->vha;
  3334. if (cmd->se_cmd.prot_op)
  3335. ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
  3336. "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
  3337. "se_cmd=%p tag[%x] op %#x/%s",
  3338. cmd->lba, cmd->lba,
  3339. cmd->num_blks, &cmd->se_cmd,
  3340. cmd->atio.u.isp24.exchange_addr,
  3341. cmd->se_cmd.prot_op,
  3342. prot_op_str(cmd->se_cmd.prot_op));
  3343. if (ctio != NULL) {
  3344. struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
  3345. term = !(c->flags &
  3346. cpu_to_le16(OF_TERM_EXCH));
  3347. } else
  3348. term = 1;
  3349. if (term)
  3350. qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
  3351. return term;
  3352. }
  3353. /* ha->hardware_lock supposed to be held on entry */
  3354. static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
  3355. struct rsp_que *rsp, uint32_t handle, void *ctio)
  3356. {
  3357. void *cmd = NULL;
  3358. struct req_que *req;
  3359. int qid = GET_QID(handle);
  3360. uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
  3361. if (unlikely(h == QLA_TGT_SKIP_HANDLE))
  3362. return NULL;
  3363. if (qid == rsp->req->id) {
  3364. req = rsp->req;
  3365. } else if (vha->hw->req_q_map[qid]) {
  3366. ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
  3367. "qla_target(%d): CTIO completion with different QID %d handle %x\n",
  3368. vha->vp_idx, rsp->id, handle);
  3369. req = vha->hw->req_q_map[qid];
  3370. } else {
  3371. return NULL;
  3372. }
  3373. h &= QLA_CMD_HANDLE_MASK;
  3374. if (h != QLA_TGT_NULL_HANDLE) {
  3375. if (unlikely(h >= req->num_outstanding_cmds)) {
  3376. ql_dbg(ql_dbg_tgt, vha, 0xe052,
  3377. "qla_target(%d): Wrong handle %x received\n",
  3378. vha->vp_idx, handle);
  3379. return NULL;
  3380. }
  3381. cmd = req->outstanding_cmds[h];
  3382. if (unlikely(cmd == NULL)) {
  3383. ql_dbg(ql_dbg_async, vha, 0xe053,
  3384. "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
  3385. vha->vp_idx, handle, req->id, rsp->id);
  3386. return NULL;
  3387. }
  3388. req->outstanding_cmds[h] = NULL;
  3389. } else if (ctio != NULL) {
  3390. /* We can't get loop ID from CTIO7 */
  3391. ql_dbg(ql_dbg_tgt, vha, 0xe054,
  3392. "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
  3393. "support NULL handles\n", vha->vp_idx);
  3394. return NULL;
  3395. }
  3396. return cmd;
  3397. }
  3398. /*
  3399. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  3400. */
  3401. static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
  3402. struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
  3403. {
  3404. struct qla_hw_data *ha = vha->hw;
  3405. struct se_cmd *se_cmd;
  3406. struct qla_tgt_cmd *cmd;
  3407. struct qla_qpair *qpair = rsp->qpair;
  3408. if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
  3409. /* That could happen only in case of an error/reset/abort */
  3410. if (status != CTIO_SUCCESS) {
  3411. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
  3412. "Intermediate CTIO received"
  3413. " (status %x)\n", status);
  3414. }
  3415. return;
  3416. }
  3417. cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
  3418. if (cmd == NULL)
  3419. return;
  3420. if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) &&
  3421. cmd->sess) {
  3422. qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess,
  3423. (struct ctio7_from_24xx *)ctio);
  3424. }
  3425. se_cmd = &cmd->se_cmd;
  3426. cmd->cmd_sent_to_fw = 0;
  3427. qlt_unmap_sg(vha, cmd);
  3428. if (unlikely(status != CTIO_SUCCESS)) {
  3429. switch (status & 0xFFFF) {
  3430. case CTIO_INVALID_RX_ID:
  3431. if (printk_ratelimit())
  3432. dev_info(&vha->hw->pdev->dev,
  3433. "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
  3434. vha->vp_idx, cmd->atio.u.isp24.attr,
  3435. ((cmd->ctio_flags >> 9) & 0xf),
  3436. cmd->ctio_flags);
  3437. break;
  3438. case CTIO_LIP_RESET:
  3439. case CTIO_TARGET_RESET:
  3440. case CTIO_ABORTED:
  3441. /* driver request abort via Terminate exchange */
  3442. case CTIO_TIMEOUT:
  3443. /* They are OK */
  3444. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
  3445. "qla_target(%d): CTIO with "
  3446. "status %#x received, state %x, se_cmd %p, "
  3447. "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
  3448. "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
  3449. status, cmd->state, se_cmd);
  3450. break;
  3451. case CTIO_PORT_LOGGED_OUT:
  3452. case CTIO_PORT_UNAVAILABLE:
  3453. {
  3454. int logged_out =
  3455. (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
  3456. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
  3457. "qla_target(%d): CTIO with %s status %x "
  3458. "received (state %x, se_cmd %p)\n", vha->vp_idx,
  3459. logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
  3460. status, cmd->state, se_cmd);
  3461. if (logged_out && cmd->sess) {
  3462. /*
  3463. * Session is already logged out, but we need
  3464. * to notify initiator, who's not aware of this
  3465. */
  3466. cmd->sess->send_els_logo = 1;
  3467. ql_dbg(ql_dbg_disc, vha, 0x20f8,
  3468. "%s %d %8phC post del sess\n",
  3469. __func__, __LINE__, cmd->sess->port_name);
  3470. qlt_schedule_sess_for_deletion(cmd->sess);
  3471. }
  3472. break;
  3473. }
  3474. case CTIO_DIF_ERROR: {
  3475. struct ctio_crc_from_fw *crc =
  3476. (struct ctio_crc_from_fw *)ctio;
  3477. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
  3478. "qla_target(%d): CTIO with DIF_ERROR status %x "
  3479. "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
  3480. "expect_dif[0x%llx]\n",
  3481. vha->vp_idx, status, cmd->state, se_cmd,
  3482. *((u64 *)&crc->actual_dif[0]),
  3483. *((u64 *)&crc->expected_dif[0]));
  3484. qlt_handle_dif_error(qpair, cmd, ctio);
  3485. return;
  3486. }
  3487. case CTIO_FAST_AUTH_ERR:
  3488. case CTIO_FAST_INCOMP_PAD_LEN:
  3489. case CTIO_FAST_INVALID_REQ:
  3490. case CTIO_FAST_SPI_ERR:
  3491. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
  3492. "qla_target(%d): CTIO with EDIF error status 0x%x received (state %x, se_cmd %p\n",
  3493. vha->vp_idx, status, cmd->state, se_cmd);
  3494. break;
  3495. default:
  3496. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
  3497. "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
  3498. vha->vp_idx, status, cmd->state, se_cmd);
  3499. break;
  3500. }
  3501. /* "cmd->aborted" means
  3502. * cmd is already aborted/terminated, we don't
  3503. * need to terminate again. The exchange is already
  3504. * cleaned up/freed at FW level. Just cleanup at driver
  3505. * level.
  3506. */
  3507. if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
  3508. (!cmd->aborted)) {
  3509. cmd->trc_flags |= TRC_CTIO_ERR;
  3510. if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
  3511. return;
  3512. }
  3513. }
  3514. if (cmd->state == QLA_TGT_STATE_PROCESSED) {
  3515. cmd->trc_flags |= TRC_CTIO_DONE;
  3516. } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
  3517. cmd->state = QLA_TGT_STATE_DATA_IN;
  3518. if (status == CTIO_SUCCESS)
  3519. cmd->write_data_transferred = 1;
  3520. ha->tgt.tgt_ops->handle_data(cmd);
  3521. return;
  3522. } else if (cmd->aborted) {
  3523. cmd->trc_flags |= TRC_CTIO_ABORTED;
  3524. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
  3525. "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
  3526. } else {
  3527. cmd->trc_flags |= TRC_CTIO_STRANGE;
  3528. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
  3529. "qla_target(%d): A command in state (%d) should "
  3530. "not return a CTIO complete\n", vha->vp_idx, cmd->state);
  3531. }
  3532. if (unlikely(status != CTIO_SUCCESS) &&
  3533. !cmd->aborted) {
  3534. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
  3535. dump_stack();
  3536. }
  3537. ha->tgt.tgt_ops->free_cmd(cmd);
  3538. }
  3539. static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
  3540. uint8_t task_codes)
  3541. {
  3542. int fcp_task_attr;
  3543. switch (task_codes) {
  3544. case ATIO_SIMPLE_QUEUE:
  3545. fcp_task_attr = TCM_SIMPLE_TAG;
  3546. break;
  3547. case ATIO_HEAD_OF_QUEUE:
  3548. fcp_task_attr = TCM_HEAD_TAG;
  3549. break;
  3550. case ATIO_ORDERED_QUEUE:
  3551. fcp_task_attr = TCM_ORDERED_TAG;
  3552. break;
  3553. case ATIO_ACA_QUEUE:
  3554. fcp_task_attr = TCM_ACA_TAG;
  3555. break;
  3556. case ATIO_UNTAGGED:
  3557. fcp_task_attr = TCM_SIMPLE_TAG;
  3558. break;
  3559. default:
  3560. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
  3561. "qla_target: unknown task code %x, use ORDERED instead\n",
  3562. task_codes);
  3563. fcp_task_attr = TCM_ORDERED_TAG;
  3564. break;
  3565. }
  3566. return fcp_task_attr;
  3567. }
  3568. /*
  3569. * Process context for I/O path into tcm_qla2xxx code
  3570. */
  3571. static void __qlt_do_work(struct qla_tgt_cmd *cmd)
  3572. {
  3573. scsi_qla_host_t *vha = cmd->vha;
  3574. struct qla_hw_data *ha = vha->hw;
  3575. struct fc_port *sess = cmd->sess;
  3576. struct atio_from_isp *atio = &cmd->atio;
  3577. unsigned char *cdb;
  3578. unsigned long flags;
  3579. uint32_t data_length;
  3580. int ret, fcp_task_attr, data_dir, bidi = 0;
  3581. struct qla_qpair *qpair = cmd->qpair;
  3582. cmd->cmd_in_wq = 0;
  3583. cmd->trc_flags |= TRC_DO_WORK;
  3584. if (cmd->aborted) {
  3585. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
  3586. "cmd with tag %u is aborted\n",
  3587. cmd->atio.u.isp24.exchange_addr);
  3588. goto out_term;
  3589. }
  3590. spin_lock_init(&cmd->cmd_lock);
  3591. cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
  3592. cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr);
  3593. if (atio->u.isp24.fcp_cmnd.rddata &&
  3594. atio->u.isp24.fcp_cmnd.wrdata) {
  3595. bidi = 1;
  3596. data_dir = DMA_TO_DEVICE;
  3597. } else if (atio->u.isp24.fcp_cmnd.rddata)
  3598. data_dir = DMA_FROM_DEVICE;
  3599. else if (atio->u.isp24.fcp_cmnd.wrdata)
  3600. data_dir = DMA_TO_DEVICE;
  3601. else
  3602. data_dir = DMA_NONE;
  3603. fcp_task_attr = qlt_get_fcp_task_attr(vha,
  3604. atio->u.isp24.fcp_cmnd.task_attr);
  3605. data_length = get_datalen_for_atio(atio);
  3606. ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
  3607. fcp_task_attr, data_dir, bidi);
  3608. if (ret != 0)
  3609. goto out_term;
  3610. /*
  3611. * Drop extra session reference from qlt_handle_cmd_for_atio().
  3612. */
  3613. ha->tgt.tgt_ops->put_sess(sess);
  3614. return;
  3615. out_term:
  3616. ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
  3617. /*
  3618. * cmd has not sent to target yet, so pass NULL as the second
  3619. * argument to qlt_send_term_exchange() and free the memory here.
  3620. */
  3621. cmd->trc_flags |= TRC_DO_WORK_ERR;
  3622. spin_lock_irqsave(qpair->qp_lock_ptr, flags);
  3623. qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
  3624. qlt_decr_num_pend_cmds(vha);
  3625. cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
  3626. spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
  3627. ha->tgt.tgt_ops->put_sess(sess);
  3628. }
  3629. static void qlt_do_work(struct work_struct *work)
  3630. {
  3631. struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
  3632. scsi_qla_host_t *vha = cmd->vha;
  3633. unsigned long flags;
  3634. spin_lock_irqsave(&vha->cmd_list_lock, flags);
  3635. list_del(&cmd->cmd_list);
  3636. spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
  3637. __qlt_do_work(cmd);
  3638. }
  3639. void qlt_clr_qp_table(struct scsi_qla_host *vha)
  3640. {
  3641. unsigned long flags;
  3642. struct qla_hw_data *ha = vha->hw;
  3643. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  3644. void *node;
  3645. u64 key = 0;
  3646. ql_log(ql_log_info, vha, 0x706c,
  3647. "User update Number of Active Qpairs %d\n",
  3648. ha->tgt.num_act_qpairs);
  3649. spin_lock_irqsave(&ha->tgt.atio_lock, flags);
  3650. btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
  3651. btree_remove64(&tgt->lun_qpair_map, key);
  3652. ha->base_qpair->lun_cnt = 0;
  3653. for (key = 0; key < ha->max_qpairs; key++)
  3654. if (ha->queue_pair_map[key])
  3655. ha->queue_pair_map[key]->lun_cnt = 0;
  3656. spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
  3657. }
  3658. static void qlt_assign_qpair(struct scsi_qla_host *vha,
  3659. struct qla_tgt_cmd *cmd)
  3660. {
  3661. struct qla_qpair *qpair, *qp;
  3662. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  3663. struct qla_qpair_hint *h;
  3664. if (vha->flags.qpairs_available) {
  3665. h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
  3666. if (unlikely(!h)) {
  3667. /* spread lun to qpair ratio evently */
  3668. int lcnt = 0, rc;
  3669. struct scsi_qla_host *base_vha =
  3670. pci_get_drvdata(vha->hw->pdev);
  3671. qpair = vha->hw->base_qpair;
  3672. if (qpair->lun_cnt == 0) {
  3673. qpair->lun_cnt++;
  3674. h = qla_qpair_to_hint(tgt, qpair);
  3675. BUG_ON(!h);
  3676. rc = btree_insert64(&tgt->lun_qpair_map,
  3677. cmd->unpacked_lun, h, GFP_ATOMIC);
  3678. if (rc) {
  3679. qpair->lun_cnt--;
  3680. ql_log(ql_log_info, vha, 0xd037,
  3681. "Unable to insert lun %llx into lun_qpair_map\n",
  3682. cmd->unpacked_lun);
  3683. }
  3684. goto out;
  3685. } else {
  3686. lcnt = qpair->lun_cnt;
  3687. }
  3688. h = NULL;
  3689. list_for_each_entry(qp, &base_vha->qp_list,
  3690. qp_list_elem) {
  3691. if (qp->lun_cnt == 0) {
  3692. qp->lun_cnt++;
  3693. h = qla_qpair_to_hint(tgt, qp);
  3694. BUG_ON(!h);
  3695. rc = btree_insert64(&tgt->lun_qpair_map,
  3696. cmd->unpacked_lun, h, GFP_ATOMIC);
  3697. if (rc) {
  3698. qp->lun_cnt--;
  3699. ql_log(ql_log_info, vha, 0xd038,
  3700. "Unable to insert lun %llx into lun_qpair_map\n",
  3701. cmd->unpacked_lun);
  3702. }
  3703. qpair = qp;
  3704. goto out;
  3705. } else {
  3706. if (qp->lun_cnt < lcnt) {
  3707. lcnt = qp->lun_cnt;
  3708. qpair = qp;
  3709. continue;
  3710. }
  3711. }
  3712. }
  3713. BUG_ON(!qpair);
  3714. qpair->lun_cnt++;
  3715. h = qla_qpair_to_hint(tgt, qpair);
  3716. BUG_ON(!h);
  3717. rc = btree_insert64(&tgt->lun_qpair_map,
  3718. cmd->unpacked_lun, h, GFP_ATOMIC);
  3719. if (rc) {
  3720. qpair->lun_cnt--;
  3721. ql_log(ql_log_info, vha, 0xd039,
  3722. "Unable to insert lun %llx into lun_qpair_map\n",
  3723. cmd->unpacked_lun);
  3724. }
  3725. }
  3726. } else {
  3727. h = &tgt->qphints[0];
  3728. }
  3729. out:
  3730. cmd->qpair = h->qpair;
  3731. cmd->se_cmd.cpuid = h->cpuid;
  3732. }
  3733. static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
  3734. struct fc_port *sess,
  3735. struct atio_from_isp *atio)
  3736. {
  3737. struct qla_tgt_cmd *cmd;
  3738. cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
  3739. if (!cmd)
  3740. return NULL;
  3741. cmd->cmd_type = TYPE_TGT_CMD;
  3742. memcpy(&cmd->atio, atio, sizeof(*atio));
  3743. INIT_LIST_HEAD(&cmd->sess_cmd_list);
  3744. cmd->state = QLA_TGT_STATE_NEW;
  3745. cmd->tgt = vha->vha_tgt.qla_tgt;
  3746. qlt_incr_num_pend_cmds(vha);
  3747. cmd->vha = vha;
  3748. cmd->sess = sess;
  3749. cmd->loop_id = sess->loop_id;
  3750. cmd->conf_compl_supported = sess->conf_compl_supported;
  3751. cmd->trc_flags = 0;
  3752. cmd->jiffies_at_alloc = get_jiffies_64();
  3753. cmd->unpacked_lun = scsilun_to_int(
  3754. (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
  3755. qlt_assign_qpair(vha, cmd);
  3756. cmd->reset_count = vha->hw->base_qpair->chip_reset;
  3757. cmd->vp_idx = vha->vp_idx;
  3758. cmd->edif = sess->edif.enable;
  3759. return cmd;
  3760. }
  3761. /* ha->hardware_lock supposed to be held on entry */
  3762. static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
  3763. struct atio_from_isp *atio)
  3764. {
  3765. struct qla_hw_data *ha = vha->hw;
  3766. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  3767. struct fc_port *sess;
  3768. struct qla_tgt_cmd *cmd;
  3769. unsigned long flags;
  3770. port_id_t id;
  3771. if (unlikely(tgt->tgt_stop)) {
  3772. ql_dbg(ql_dbg_io, vha, 0x3061,
  3773. "New command while device %p is shutting down\n", tgt);
  3774. return -ENODEV;
  3775. }
  3776. id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
  3777. if (IS_SW_RESV_ADDR(id))
  3778. return -EBUSY;
  3779. sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
  3780. if (unlikely(!sess))
  3781. return -EFAULT;
  3782. /* Another WWN used to have our s_id. Our PLOGI scheduled its
  3783. * session deletion, but it's still in sess_del_work wq */
  3784. if (sess->deleted) {
  3785. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
  3786. "New command while old session %p is being deleted\n",
  3787. sess);
  3788. return -EFAULT;
  3789. }
  3790. /*
  3791. * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
  3792. */
  3793. if (!kref_get_unless_zero(&sess->sess_kref)) {
  3794. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
  3795. "%s: kref_get fail, %8phC oxid %x \n",
  3796. __func__, sess->port_name,
  3797. be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
  3798. return -EFAULT;
  3799. }
  3800. cmd = qlt_get_tag(vha, sess, atio);
  3801. if (!cmd) {
  3802. ql_dbg(ql_dbg_io, vha, 0x3062,
  3803. "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
  3804. ha->tgt.tgt_ops->put_sess(sess);
  3805. return -EBUSY;
  3806. }
  3807. cmd->cmd_in_wq = 1;
  3808. cmd->trc_flags |= TRC_NEW_CMD;
  3809. spin_lock_irqsave(&vha->cmd_list_lock, flags);
  3810. list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
  3811. spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
  3812. INIT_WORK(&cmd->work, qlt_do_work);
  3813. if (vha->flags.qpairs_available) {
  3814. queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
  3815. } else if (ha->msix_count) {
  3816. if (cmd->atio.u.isp24.fcp_cmnd.rddata)
  3817. queue_work(qla_tgt_wq, &cmd->work);
  3818. else
  3819. queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
  3820. &cmd->work);
  3821. } else {
  3822. queue_work(qla_tgt_wq, &cmd->work);
  3823. }
  3824. return 0;
  3825. }
  3826. /* ha->hardware_lock supposed to be held on entry */
  3827. static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
  3828. int fn, void *iocb, int flags)
  3829. {
  3830. struct scsi_qla_host *vha = sess->vha;
  3831. struct qla_hw_data *ha = vha->hw;
  3832. struct qla_tgt_mgmt_cmd *mcmd;
  3833. struct atio_from_isp *a = (struct atio_from_isp *)iocb;
  3834. struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
  3835. mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
  3836. if (!mcmd) {
  3837. ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
  3838. "qla_target(%d): Allocation of management "
  3839. "command failed, some commands and their data could "
  3840. "leak\n", vha->vp_idx);
  3841. return -ENOMEM;
  3842. }
  3843. memset(mcmd, 0, sizeof(*mcmd));
  3844. mcmd->sess = sess;
  3845. if (iocb) {
  3846. memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
  3847. sizeof(mcmd->orig_iocb.imm_ntfy));
  3848. }
  3849. mcmd->tmr_func = fn;
  3850. mcmd->flags = flags;
  3851. mcmd->reset_count = ha->base_qpair->chip_reset;
  3852. mcmd->qpair = h->qpair;
  3853. mcmd->vha = vha;
  3854. mcmd->se_cmd.cpuid = h->cpuid;
  3855. mcmd->unpacked_lun = lun;
  3856. switch (fn) {
  3857. case QLA_TGT_LUN_RESET:
  3858. case QLA_TGT_CLEAR_TS:
  3859. case QLA_TGT_ABORT_TS:
  3860. abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
  3861. fallthrough;
  3862. case QLA_TGT_CLEAR_ACA:
  3863. h = qlt_find_qphint(vha, mcmd->unpacked_lun);
  3864. mcmd->qpair = h->qpair;
  3865. mcmd->se_cmd.cpuid = h->cpuid;
  3866. break;
  3867. case QLA_TGT_TARGET_RESET:
  3868. case QLA_TGT_NEXUS_LOSS_SESS:
  3869. case QLA_TGT_NEXUS_LOSS:
  3870. case QLA_TGT_ABORT_ALL:
  3871. default:
  3872. /* no-op */
  3873. break;
  3874. }
  3875. INIT_WORK(&mcmd->work, qlt_do_tmr_work);
  3876. queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq,
  3877. &mcmd->work);
  3878. return 0;
  3879. }
  3880. /* ha->hardware_lock supposed to be held on entry */
  3881. static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
  3882. {
  3883. struct atio_from_isp *a = (struct atio_from_isp *)iocb;
  3884. struct qla_hw_data *ha = vha->hw;
  3885. struct fc_port *sess;
  3886. u64 unpacked_lun;
  3887. int fn;
  3888. unsigned long flags;
  3889. fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
  3890. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  3891. sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
  3892. a->u.isp24.fcp_hdr.s_id);
  3893. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  3894. unpacked_lun =
  3895. scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
  3896. if (sess == NULL || sess->deleted)
  3897. return -EFAULT;
  3898. return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
  3899. }
  3900. /* ha->hardware_lock supposed to be held on entry */
  3901. static int __qlt_abort_task(struct scsi_qla_host *vha,
  3902. struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
  3903. {
  3904. struct atio_from_isp *a = (struct atio_from_isp *)iocb;
  3905. struct qla_hw_data *ha = vha->hw;
  3906. struct qla_tgt_mgmt_cmd *mcmd;
  3907. u64 unpacked_lun;
  3908. int rc;
  3909. mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
  3910. if (mcmd == NULL) {
  3911. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
  3912. "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
  3913. vha->vp_idx, __func__);
  3914. return -ENOMEM;
  3915. }
  3916. memset(mcmd, 0, sizeof(*mcmd));
  3917. mcmd->sess = sess;
  3918. memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
  3919. sizeof(mcmd->orig_iocb.imm_ntfy));
  3920. unpacked_lun =
  3921. scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
  3922. mcmd->reset_count = ha->base_qpair->chip_reset;
  3923. mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
  3924. mcmd->qpair = ha->base_qpair;
  3925. rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
  3926. le16_to_cpu(iocb->u.isp2x.seq_id));
  3927. if (rc != 0) {
  3928. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
  3929. "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
  3930. vha->vp_idx, rc);
  3931. mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
  3932. return -EFAULT;
  3933. }
  3934. return 0;
  3935. }
  3936. /* ha->hardware_lock supposed to be held on entry */
  3937. static int qlt_abort_task(struct scsi_qla_host *vha,
  3938. struct imm_ntfy_from_isp *iocb)
  3939. {
  3940. struct qla_hw_data *ha = vha->hw;
  3941. struct fc_port *sess;
  3942. int loop_id;
  3943. unsigned long flags;
  3944. loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
  3945. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  3946. sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
  3947. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  3948. if (sess == NULL) {
  3949. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
  3950. "qla_target(%d): task abort for unexisting "
  3951. "session\n", vha->vp_idx);
  3952. return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
  3953. QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
  3954. }
  3955. return __qlt_abort_task(vha, iocb, sess);
  3956. }
  3957. void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
  3958. {
  3959. if (rc != MBS_COMMAND_COMPLETE) {
  3960. ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
  3961. "%s: se_sess %p / sess %p from"
  3962. " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
  3963. " LOGO failed: %#x\n",
  3964. __func__,
  3965. fcport->se_sess,
  3966. fcport,
  3967. fcport->port_name, fcport->loop_id,
  3968. fcport->d_id.b.domain, fcport->d_id.b.area,
  3969. fcport->d_id.b.al_pa, rc);
  3970. }
  3971. fcport->logout_completed = 1;
  3972. }
  3973. /*
  3974. * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
  3975. *
  3976. * Schedules sessions with matching port_id/loop_id but different wwn for
  3977. * deletion. Returns existing session with matching wwn if present.
  3978. * Null otherwise.
  3979. */
  3980. struct fc_port *
  3981. qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
  3982. port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
  3983. {
  3984. struct fc_port *sess = NULL, *other_sess;
  3985. uint64_t other_wwn;
  3986. *conflict_sess = NULL;
  3987. list_for_each_entry(other_sess, &vha->vp_fcports, list) {
  3988. other_wwn = wwn_to_u64(other_sess->port_name);
  3989. if (wwn == other_wwn) {
  3990. WARN_ON(sess);
  3991. sess = other_sess;
  3992. continue;
  3993. }
  3994. /* find other sess with nport_id collision */
  3995. if (port_id.b24 == other_sess->d_id.b24) {
  3996. if (loop_id != other_sess->loop_id) {
  3997. ql_dbg(ql_dbg_disc, vha, 0x1000c,
  3998. "Invalidating sess %p loop_id %d wwn %llx.\n",
  3999. other_sess, other_sess->loop_id, other_wwn);
  4000. /*
  4001. * logout_on_delete is set by default, but another
  4002. * session that has the same s_id/loop_id combo
  4003. * might have cleared it when requested this session
  4004. * deletion, so don't touch it
  4005. */
  4006. qlt_schedule_sess_for_deletion(other_sess);
  4007. } else {
  4008. /*
  4009. * Another wwn used to have our s_id/loop_id
  4010. * kill the session, but don't free the loop_id
  4011. */
  4012. ql_dbg(ql_dbg_disc, vha, 0xf01b,
  4013. "Invalidating sess %p loop_id %d wwn %llx.\n",
  4014. other_sess, other_sess->loop_id, other_wwn);
  4015. other_sess->keep_nport_handle = 1;
  4016. if (other_sess->disc_state != DSC_DELETED)
  4017. *conflict_sess = other_sess;
  4018. qlt_schedule_sess_for_deletion(other_sess);
  4019. }
  4020. continue;
  4021. }
  4022. /* find other sess with nport handle collision */
  4023. if ((loop_id == other_sess->loop_id) &&
  4024. (loop_id != FC_NO_LOOP_ID)) {
  4025. ql_dbg(ql_dbg_disc, vha, 0x1000d,
  4026. "Invalidating sess %p loop_id %d wwn %llx.\n",
  4027. other_sess, other_sess->loop_id, other_wwn);
  4028. /* Same loop_id but different s_id
  4029. * Ok to kill and logout */
  4030. qlt_schedule_sess_for_deletion(other_sess);
  4031. }
  4032. }
  4033. return sess;
  4034. }
  4035. /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
  4036. static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
  4037. {
  4038. struct qla_tgt_sess_op *op;
  4039. struct qla_tgt_cmd *cmd;
  4040. uint32_t key;
  4041. int count = 0;
  4042. unsigned long flags;
  4043. key = (((u32)s_id->b.domain << 16) |
  4044. ((u32)s_id->b.area << 8) |
  4045. ((u32)s_id->b.al_pa));
  4046. spin_lock_irqsave(&vha->cmd_list_lock, flags);
  4047. list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
  4048. uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
  4049. if (op_key == key) {
  4050. op->aborted = true;
  4051. count++;
  4052. }
  4053. }
  4054. list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
  4055. uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
  4056. if (cmd_key == key) {
  4057. cmd->aborted = 1;
  4058. count++;
  4059. }
  4060. }
  4061. spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
  4062. return count;
  4063. }
  4064. static int qlt_handle_login(struct scsi_qla_host *vha,
  4065. struct imm_ntfy_from_isp *iocb)
  4066. {
  4067. struct fc_port *sess = NULL, *conflict_sess = NULL;
  4068. uint64_t wwn;
  4069. port_id_t port_id;
  4070. uint16_t loop_id, wd3_lo;
  4071. int res = 0;
  4072. struct qlt_plogi_ack_t *pla;
  4073. unsigned long flags;
  4074. lockdep_assert_held(&vha->hw->hardware_lock);
  4075. wwn = wwn_to_u64(iocb->u.isp24.port_name);
  4076. port_id.b.domain = iocb->u.isp24.port_id[2];
  4077. port_id.b.area = iocb->u.isp24.port_id[1];
  4078. port_id.b.al_pa = iocb->u.isp24.port_id[0];
  4079. port_id.b.rsvd_1 = 0;
  4080. loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
  4081. /* Mark all stale commands sitting in qla_tgt_wq for deletion */
  4082. abort_cmds_for_s_id(vha, &port_id);
  4083. if (wwn) {
  4084. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  4085. sess = qlt_find_sess_invalidate_other(vha, wwn,
  4086. port_id, loop_id, &conflict_sess);
  4087. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  4088. } else {
  4089. ql_dbg(ql_dbg_disc, vha, 0xffff,
  4090. "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
  4091. __func__, __LINE__, loop_id, port_id.b24);
  4092. qlt_send_term_imm_notif(vha, iocb, 1);
  4093. goto out;
  4094. }
  4095. if (IS_SW_RESV_ADDR(port_id)) {
  4096. res = 1;
  4097. goto out;
  4098. }
  4099. if (vha->hw->flags.edif_enabled &&
  4100. !(vha->e_dbell.db_flags & EDB_ACTIVE) &&
  4101. iocb->u.isp24.status_subcode == ELS_PLOGI &&
  4102. !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
  4103. ql_dbg(ql_dbg_disc, vha, 0xffff,
  4104. "%s %d Term INOT due to app not available lid=%d, NportID %06X ",
  4105. __func__, __LINE__, loop_id, port_id.b24);
  4106. qlt_send_term_imm_notif(vha, iocb, 1);
  4107. goto out;
  4108. }
  4109. if (vha->hw->flags.edif_enabled) {
  4110. if (DBELL_INACTIVE(vha)) {
  4111. ql_dbg(ql_dbg_disc, vha, 0xffff,
  4112. "%s %d Term INOT due to app not started lid=%d, NportID %06X ",
  4113. __func__, __LINE__, loop_id, port_id.b24);
  4114. qlt_send_term_imm_notif(vha, iocb, 1);
  4115. goto out;
  4116. } else if (iocb->u.isp24.status_subcode == ELS_PLOGI &&
  4117. !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
  4118. ql_dbg(ql_dbg_disc, vha, 0xffff,
  4119. "%s %d Term INOT due to unsecure lid=%d, NportID %06X ",
  4120. __func__, __LINE__, loop_id, port_id.b24);
  4121. qlt_send_term_imm_notif(vha, iocb, 1);
  4122. goto out;
  4123. }
  4124. }
  4125. pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
  4126. if (!pla) {
  4127. ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
  4128. "%s %d %8phC Term INOT due to mem alloc fail",
  4129. __func__, __LINE__,
  4130. iocb->u.isp24.port_name);
  4131. qlt_send_term_imm_notif(vha, iocb, 1);
  4132. goto out;
  4133. }
  4134. if (conflict_sess) {
  4135. conflict_sess->login_gen++;
  4136. qlt_plogi_ack_link(vha, pla, conflict_sess,
  4137. QLT_PLOGI_LINK_CONFLICT);
  4138. }
  4139. if (!sess) {
  4140. pla->ref_count++;
  4141. ql_dbg(ql_dbg_disc, vha, 0xffff,
  4142. "%s %d %8phC post new sess\n",
  4143. __func__, __LINE__, iocb->u.isp24.port_name);
  4144. if (iocb->u.isp24.status_subcode == ELS_PLOGI)
  4145. qla24xx_post_newsess_work(vha, &port_id,
  4146. iocb->u.isp24.port_name,
  4147. iocb->u.isp24.u.plogi.node_name,
  4148. pla, 0);
  4149. else
  4150. qla24xx_post_newsess_work(vha, &port_id,
  4151. iocb->u.isp24.port_name, NULL,
  4152. pla, 0);
  4153. goto out;
  4154. }
  4155. if (sess->disc_state == DSC_UPD_FCPORT) {
  4156. u16 sec;
  4157. /*
  4158. * Remote port registration is still going on from
  4159. * previous login. Allow it to finish before we
  4160. * accept the new login.
  4161. */
  4162. sess->next_disc_state = DSC_DELETE_PEND;
  4163. sec = jiffies_to_msecs(jiffies -
  4164. sess->jiffies_at_registration) / 1000;
  4165. if (sess->sec_since_registration < sec && sec &&
  4166. !(sec % 5)) {
  4167. sess->sec_since_registration = sec;
  4168. ql_dbg(ql_dbg_disc, vha, 0xffff,
  4169. "%s %8phC - Slow Rport registration (%d Sec)\n",
  4170. __func__, sess->port_name, sec);
  4171. }
  4172. if (!conflict_sess) {
  4173. list_del(&pla->list);
  4174. kmem_cache_free(qla_tgt_plogi_cachep, pla);
  4175. }
  4176. qlt_send_term_imm_notif(vha, iocb, 1);
  4177. goto out;
  4178. }
  4179. qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
  4180. sess->d_id = port_id;
  4181. sess->login_gen++;
  4182. sess->loop_id = loop_id;
  4183. if (iocb->u.isp24.status_subcode == ELS_PLOGI) {
  4184. /* remote port has assigned Port ID */
  4185. if (N2N_TOPO(vha->hw) && fcport_is_bigger(sess))
  4186. vha->d_id = sess->d_id;
  4187. ql_dbg(ql_dbg_disc, vha, 0xffff,
  4188. "%s %8phC - send port online\n",
  4189. __func__, sess->port_name);
  4190. qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
  4191. sess->d_id.b24);
  4192. }
  4193. if (iocb->u.isp24.status_subcode == ELS_PRLI) {
  4194. sess->fw_login_state = DSC_LS_PRLI_PEND;
  4195. sess->local = 0;
  4196. sess->loop_id = loop_id;
  4197. sess->d_id = port_id;
  4198. sess->fw_login_state = DSC_LS_PRLI_PEND;
  4199. wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
  4200. if (wd3_lo & BIT_7)
  4201. sess->conf_compl_supported = 1;
  4202. if ((wd3_lo & BIT_4) == 0)
  4203. sess->port_type = FCT_INITIATOR;
  4204. else
  4205. sess->port_type = FCT_TARGET;
  4206. } else
  4207. sess->fw_login_state = DSC_LS_PLOGI_PEND;
  4208. ql_dbg(ql_dbg_disc, vha, 0x20f9,
  4209. "%s %d %8phC DS %d\n",
  4210. __func__, __LINE__, sess->port_name, sess->disc_state);
  4211. switch (sess->disc_state) {
  4212. case DSC_DELETED:
  4213. case DSC_LOGIN_PEND:
  4214. qlt_plogi_ack_unref(vha, pla);
  4215. break;
  4216. default:
  4217. /*
  4218. * Under normal circumstances we want to release nport handle
  4219. * during LOGO process to avoid nport handle leaks inside FW.
  4220. * The exception is when LOGO is done while another PLOGI with
  4221. * the same nport handle is waiting as might be the case here.
  4222. * Note: there is always a possibily of a race where session
  4223. * deletion has already started for other reasons (e.g. ACL
  4224. * removal) and now PLOGI arrives:
  4225. * 1. if PLOGI arrived in FW after nport handle has been freed,
  4226. * FW must have assigned this PLOGI a new/same handle and we
  4227. * can proceed ACK'ing it as usual when session deletion
  4228. * completes.
  4229. * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
  4230. * bit reached it, the handle has now been released. We'll
  4231. * get an error when we ACK this PLOGI. Nothing will be sent
  4232. * back to initiator. Initiator should eventually retry
  4233. * PLOGI and situation will correct itself.
  4234. */
  4235. sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
  4236. (sess->d_id.b24 == port_id.b24));
  4237. ql_dbg(ql_dbg_disc, vha, 0x20f9,
  4238. "%s %d %8phC post del sess\n",
  4239. __func__, __LINE__, sess->port_name);
  4240. qlt_schedule_sess_for_deletion(sess);
  4241. break;
  4242. }
  4243. out:
  4244. return res;
  4245. }
  4246. /*
  4247. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  4248. */
  4249. static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
  4250. struct imm_ntfy_from_isp *iocb)
  4251. {
  4252. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  4253. struct qla_hw_data *ha = vha->hw;
  4254. struct fc_port *sess = NULL, *conflict_sess = NULL;
  4255. uint64_t wwn;
  4256. port_id_t port_id;
  4257. uint16_t loop_id;
  4258. uint16_t wd3_lo;
  4259. int res = 0;
  4260. unsigned long flags;
  4261. lockdep_assert_held(&ha->hardware_lock);
  4262. wwn = wwn_to_u64(iocb->u.isp24.port_name);
  4263. port_id.b.domain = iocb->u.isp24.port_id[2];
  4264. port_id.b.area = iocb->u.isp24.port_id[1];
  4265. port_id.b.al_pa = iocb->u.isp24.port_id[0];
  4266. port_id.b.rsvd_1 = 0;
  4267. loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
  4268. ql_dbg(ql_dbg_disc, vha, 0xf026,
  4269. "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
  4270. vha->vp_idx, iocb->u.isp24.port_id[2],
  4271. iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
  4272. iocb->u.isp24.status_subcode, loop_id,
  4273. iocb->u.isp24.port_name);
  4274. /* res = 1 means ack at the end of thread
  4275. * res = 0 means ack async/later.
  4276. */
  4277. switch (iocb->u.isp24.status_subcode) {
  4278. case ELS_PLOGI:
  4279. res = qlt_handle_login(vha, iocb);
  4280. break;
  4281. case ELS_PRLI:
  4282. if (N2N_TOPO(ha)) {
  4283. sess = qla2x00_find_fcport_by_wwpn(vha,
  4284. iocb->u.isp24.port_name, 1);
  4285. if (vha->hw->flags.edif_enabled && sess &&
  4286. (!(sess->flags & FCF_FCSP_DEVICE) ||
  4287. !sess->edif.authok)) {
  4288. ql_dbg(ql_dbg_disc, vha, 0xffff,
  4289. "%s %d %8phC Term PRLI due to unauthorize PRLI\n",
  4290. __func__, __LINE__, iocb->u.isp24.port_name);
  4291. qlt_send_term_imm_notif(vha, iocb, 1);
  4292. break;
  4293. }
  4294. if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
  4295. ql_dbg(ql_dbg_disc, vha, 0xffff,
  4296. "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
  4297. __func__, __LINE__,
  4298. iocb->u.isp24.port_name);
  4299. qlt_send_term_imm_notif(vha, iocb, 1);
  4300. break;
  4301. }
  4302. res = qlt_handle_login(vha, iocb);
  4303. break;
  4304. }
  4305. if (IS_SW_RESV_ADDR(port_id)) {
  4306. res = 1;
  4307. break;
  4308. }
  4309. wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
  4310. if (wwn) {
  4311. spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
  4312. sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
  4313. loop_id, &conflict_sess);
  4314. spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
  4315. }
  4316. if (conflict_sess) {
  4317. switch (conflict_sess->disc_state) {
  4318. case DSC_DELETED:
  4319. case DSC_DELETE_PEND:
  4320. break;
  4321. default:
  4322. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
  4323. "PRLI with conflicting sess %p port %8phC\n",
  4324. conflict_sess, conflict_sess->port_name);
  4325. conflict_sess->fw_login_state =
  4326. DSC_LS_PORT_UNAVAIL;
  4327. qlt_send_term_imm_notif(vha, iocb, 1);
  4328. res = 0;
  4329. break;
  4330. }
  4331. }
  4332. if (sess != NULL) {
  4333. bool delete = false;
  4334. int sec;
  4335. if (vha->hw->flags.edif_enabled && sess &&
  4336. (!(sess->flags & FCF_FCSP_DEVICE) ||
  4337. !sess->edif.authok)) {
  4338. ql_dbg(ql_dbg_disc, vha, 0xffff,
  4339. "%s %d %8phC Term PRLI due to unauthorize prli\n",
  4340. __func__, __LINE__, iocb->u.isp24.port_name);
  4341. qlt_send_term_imm_notif(vha, iocb, 1);
  4342. break;
  4343. }
  4344. spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
  4345. switch (sess->fw_login_state) {
  4346. case DSC_LS_PLOGI_PEND:
  4347. case DSC_LS_PLOGI_COMP:
  4348. case DSC_LS_PRLI_COMP:
  4349. break;
  4350. default:
  4351. delete = true;
  4352. break;
  4353. }
  4354. switch (sess->disc_state) {
  4355. case DSC_UPD_FCPORT:
  4356. spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
  4357. flags);
  4358. sec = jiffies_to_msecs(jiffies -
  4359. sess->jiffies_at_registration)/1000;
  4360. if (sess->sec_since_registration < sec && sec &&
  4361. !(sec % 5)) {
  4362. sess->sec_since_registration = sec;
  4363. ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
  4364. "%s %8phC : Slow Rport registration(%d Sec)\n",
  4365. __func__, sess->port_name, sec);
  4366. }
  4367. qlt_send_term_imm_notif(vha, iocb, 1);
  4368. return 0;
  4369. case DSC_LOGIN_PEND:
  4370. case DSC_GPDB:
  4371. case DSC_LOGIN_COMPLETE:
  4372. case DSC_ADISC:
  4373. delete = false;
  4374. break;
  4375. default:
  4376. break;
  4377. }
  4378. if (delete) {
  4379. spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
  4380. flags);
  4381. /*
  4382. * Impatient initiator sent PRLI before last
  4383. * PLOGI could finish. Will force him to re-try,
  4384. * while last one finishes.
  4385. */
  4386. ql_log(ql_log_warn, sess->vha, 0xf095,
  4387. "sess %p PRLI received, before plogi ack.\n",
  4388. sess);
  4389. qlt_send_term_imm_notif(vha, iocb, 1);
  4390. res = 0;
  4391. break;
  4392. }
  4393. /*
  4394. * This shouldn't happen under normal circumstances,
  4395. * since we have deleted the old session during PLOGI
  4396. */
  4397. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
  4398. "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
  4399. sess->loop_id, sess, iocb->u.isp24.nport_handle);
  4400. sess->local = 0;
  4401. sess->loop_id = loop_id;
  4402. sess->d_id = port_id;
  4403. sess->fw_login_state = DSC_LS_PRLI_PEND;
  4404. if (wd3_lo & BIT_7)
  4405. sess->conf_compl_supported = 1;
  4406. if ((wd3_lo & BIT_4) == 0)
  4407. sess->port_type = FCT_INITIATOR;
  4408. else
  4409. sess->port_type = FCT_TARGET;
  4410. spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
  4411. }
  4412. res = 1; /* send notify ack */
  4413. /* Make session global (not used in fabric mode) */
  4414. if (ha->current_topology != ISP_CFG_F) {
  4415. if (sess) {
  4416. ql_dbg(ql_dbg_disc, vha, 0x20fa,
  4417. "%s %d %8phC post nack\n",
  4418. __func__, __LINE__, sess->port_name);
  4419. qla24xx_post_nack_work(vha, sess, iocb,
  4420. SRB_NACK_PRLI);
  4421. res = 0;
  4422. } else {
  4423. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  4424. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  4425. qla2xxx_wake_dpc(vha);
  4426. }
  4427. } else {
  4428. if (sess) {
  4429. ql_dbg(ql_dbg_disc, vha, 0x20fb,
  4430. "%s %d %8phC post nack\n",
  4431. __func__, __LINE__, sess->port_name);
  4432. qla24xx_post_nack_work(vha, sess, iocb,
  4433. SRB_NACK_PRLI);
  4434. res = 0;
  4435. }
  4436. }
  4437. break;
  4438. case ELS_TPRLO:
  4439. if (le16_to_cpu(iocb->u.isp24.flags) &
  4440. NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
  4441. loop_id = 0xFFFF;
  4442. qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
  4443. res = 1;
  4444. break;
  4445. }
  4446. fallthrough;
  4447. case ELS_LOGO:
  4448. case ELS_PRLO:
  4449. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  4450. sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
  4451. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  4452. if (sess) {
  4453. sess->login_gen++;
  4454. sess->fw_login_state = DSC_LS_LOGO_PEND;
  4455. sess->logo_ack_needed = 1;
  4456. memcpy(sess->iocb, iocb, IOCB_SIZE);
  4457. }
  4458. res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
  4459. ql_dbg(ql_dbg_disc, vha, 0x20fc,
  4460. "%s: logo %llx res %d sess %p ",
  4461. __func__, wwn, res, sess);
  4462. if (res == 0) {
  4463. /*
  4464. * cmd went upper layer, look for qlt_xmit_tm_rsp()
  4465. * for LOGO_ACK & sess delete
  4466. */
  4467. BUG_ON(!sess);
  4468. res = 0;
  4469. } else {
  4470. /* cmd did not go to upper layer. */
  4471. if (sess) {
  4472. qlt_schedule_sess_for_deletion(sess);
  4473. res = 0;
  4474. }
  4475. /* else logo will be ack */
  4476. }
  4477. break;
  4478. case ELS_PDISC:
  4479. case ELS_ADISC:
  4480. {
  4481. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  4482. if (tgt->link_reinit_iocb_pending) {
  4483. qlt_send_notify_ack(ha->base_qpair,
  4484. &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
  4485. tgt->link_reinit_iocb_pending = 0;
  4486. }
  4487. sess = qla2x00_find_fcport_by_wwpn(vha,
  4488. iocb->u.isp24.port_name, 1);
  4489. if (sess) {
  4490. ql_dbg(ql_dbg_disc, vha, 0x20fd,
  4491. "sess %p lid %d|%d DS %d LS %d\n",
  4492. sess, sess->loop_id, loop_id,
  4493. sess->disc_state, sess->fw_login_state);
  4494. }
  4495. res = 1; /* send notify ack */
  4496. break;
  4497. }
  4498. case ELS_FLOGI: /* should never happen */
  4499. default:
  4500. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
  4501. "qla_target(%d): Unsupported ELS command %x "
  4502. "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
  4503. res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
  4504. break;
  4505. }
  4506. ql_dbg(ql_dbg_disc, vha, 0xf026,
  4507. "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
  4508. vha->vp_idx, iocb->u.isp24.status_subcode, res);
  4509. return res;
  4510. }
  4511. /*
  4512. * ha->hardware_lock supposed to be held on entry.
  4513. * Might drop it, then reacquire.
  4514. */
  4515. static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
  4516. struct imm_ntfy_from_isp *iocb)
  4517. {
  4518. struct qla_hw_data *ha = vha->hw;
  4519. uint32_t add_flags = 0;
  4520. int send_notify_ack = 1;
  4521. uint16_t status;
  4522. lockdep_assert_held(&ha->hardware_lock);
  4523. status = le16_to_cpu(iocb->u.isp2x.status);
  4524. switch (status) {
  4525. case IMM_NTFY_LIP_RESET:
  4526. {
  4527. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
  4528. "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
  4529. vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
  4530. iocb->u.isp24.status_subcode);
  4531. if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
  4532. send_notify_ack = 0;
  4533. break;
  4534. }
  4535. case IMM_NTFY_LIP_LINK_REINIT:
  4536. {
  4537. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  4538. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
  4539. "qla_target(%d): LINK REINIT (loop %#x, "
  4540. "subcode %x)\n", vha->vp_idx,
  4541. le16_to_cpu(iocb->u.isp24.nport_handle),
  4542. iocb->u.isp24.status_subcode);
  4543. if (tgt->link_reinit_iocb_pending) {
  4544. qlt_send_notify_ack(ha->base_qpair,
  4545. &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
  4546. }
  4547. memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
  4548. tgt->link_reinit_iocb_pending = 1;
  4549. /*
  4550. * QLogic requires to wait after LINK REINIT for possible
  4551. * PDISC or ADISC ELS commands
  4552. */
  4553. send_notify_ack = 0;
  4554. break;
  4555. }
  4556. case IMM_NTFY_PORT_LOGOUT:
  4557. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
  4558. "qla_target(%d): Port logout (loop "
  4559. "%#x, subcode %x)\n", vha->vp_idx,
  4560. le16_to_cpu(iocb->u.isp24.nport_handle),
  4561. iocb->u.isp24.status_subcode);
  4562. if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
  4563. send_notify_ack = 0;
  4564. /* The sessions will be cleared in the callback, if needed */
  4565. break;
  4566. case IMM_NTFY_GLBL_TPRLO:
  4567. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
  4568. "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
  4569. if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
  4570. send_notify_ack = 0;
  4571. /* The sessions will be cleared in the callback, if needed */
  4572. break;
  4573. case IMM_NTFY_PORT_CONFIG:
  4574. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
  4575. "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
  4576. status);
  4577. if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
  4578. send_notify_ack = 0;
  4579. /* The sessions will be cleared in the callback, if needed */
  4580. break;
  4581. case IMM_NTFY_GLBL_LOGO:
  4582. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
  4583. "qla_target(%d): Link failure detected\n",
  4584. vha->vp_idx);
  4585. /* I_T nexus loss */
  4586. if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
  4587. send_notify_ack = 0;
  4588. break;
  4589. case IMM_NTFY_IOCB_OVERFLOW:
  4590. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
  4591. "qla_target(%d): Cannot provide requested "
  4592. "capability (IOCB overflowed the immediate notify "
  4593. "resource count)\n", vha->vp_idx);
  4594. break;
  4595. case IMM_NTFY_ABORT_TASK:
  4596. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
  4597. "qla_target(%d): Abort Task (S %08x I %#x -> "
  4598. "L %#x)\n", vha->vp_idx,
  4599. le16_to_cpu(iocb->u.isp2x.seq_id),
  4600. GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
  4601. le16_to_cpu(iocb->u.isp2x.lun));
  4602. if (qlt_abort_task(vha, iocb) == 0)
  4603. send_notify_ack = 0;
  4604. break;
  4605. case IMM_NTFY_RESOURCE:
  4606. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
  4607. "qla_target(%d): Out of resources, host %ld\n",
  4608. vha->vp_idx, vha->host_no);
  4609. break;
  4610. case IMM_NTFY_MSG_RX:
  4611. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
  4612. "qla_target(%d): Immediate notify task %x\n",
  4613. vha->vp_idx, iocb->u.isp2x.task_flags);
  4614. break;
  4615. case IMM_NTFY_ELS:
  4616. if (qlt_24xx_handle_els(vha, iocb) == 0)
  4617. send_notify_ack = 0;
  4618. break;
  4619. default:
  4620. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
  4621. "qla_target(%d): Received unknown immediate "
  4622. "notify status %x\n", vha->vp_idx, status);
  4623. break;
  4624. }
  4625. if (send_notify_ack)
  4626. qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0,
  4627. 0, 0);
  4628. }
  4629. /*
  4630. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  4631. * This function sends busy to ISP 2xxx or 24xx.
  4632. */
  4633. static int __qlt_send_busy(struct qla_qpair *qpair,
  4634. struct atio_from_isp *atio, uint16_t status)
  4635. {
  4636. struct scsi_qla_host *vha = qpair->vha;
  4637. struct ctio7_to_24xx *ctio24;
  4638. struct qla_hw_data *ha = vha->hw;
  4639. request_t *pkt;
  4640. struct fc_port *sess = NULL;
  4641. unsigned long flags;
  4642. u16 temp;
  4643. port_id_t id;
  4644. id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
  4645. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  4646. sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
  4647. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  4648. if (!sess) {
  4649. qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
  4650. return 0;
  4651. }
  4652. /* Sending marker isn't necessary, since we called from ISR */
  4653. pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
  4654. if (!pkt) {
  4655. ql_dbg(ql_dbg_io, vha, 0x3063,
  4656. "qla_target(%d): %s failed: unable to allocate "
  4657. "request packet", vha->vp_idx, __func__);
  4658. return -ENOMEM;
  4659. }
  4660. qpair->tgt_counters.num_q_full_sent++;
  4661. pkt->entry_count = 1;
  4662. pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
  4663. ctio24 = (struct ctio7_to_24xx *)pkt;
  4664. ctio24->entry_type = CTIO_TYPE7;
  4665. ctio24->nport_handle = cpu_to_le16(sess->loop_id);
  4666. ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
  4667. ctio24->vp_index = vha->vp_idx;
  4668. ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
  4669. ctio24->exchange_addr = atio->u.isp24.exchange_addr;
  4670. temp = (atio->u.isp24.attr << 9) |
  4671. CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
  4672. CTIO7_FLAGS_DONT_RET_CTIO;
  4673. ctio24->u.status1.flags = cpu_to_le16(temp);
  4674. /*
  4675. * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
  4676. * if the explicit conformation is used.
  4677. */
  4678. ctio24->u.status1.ox_id =
  4679. cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
  4680. ctio24->u.status1.scsi_status = cpu_to_le16(status);
  4681. ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
  4682. if (ctio24->u.status1.residual != 0)
  4683. ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER);
  4684. /* Memory Barrier */
  4685. wmb();
  4686. if (qpair->reqq_start_iocbs)
  4687. qpair->reqq_start_iocbs(qpair);
  4688. else
  4689. qla2x00_start_iocbs(vha, qpair->req);
  4690. return 0;
  4691. }
  4692. /*
  4693. * This routine is used to allocate a command for either a QFull condition
  4694. * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
  4695. * out previously.
  4696. */
  4697. static void
  4698. qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
  4699. struct atio_from_isp *atio, uint16_t status, int qfull)
  4700. {
  4701. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  4702. struct qla_hw_data *ha = vha->hw;
  4703. struct fc_port *sess;
  4704. struct qla_tgt_cmd *cmd;
  4705. unsigned long flags;
  4706. if (unlikely(tgt->tgt_stop)) {
  4707. ql_dbg(ql_dbg_io, vha, 0x300a,
  4708. "New command while device %p is shutting down\n", tgt);
  4709. return;
  4710. }
  4711. if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
  4712. vha->hw->tgt.num_qfull_cmds_dropped++;
  4713. if (vha->hw->tgt.num_qfull_cmds_dropped >
  4714. vha->qla_stats.stat_max_qfull_cmds_dropped)
  4715. vha->qla_stats.stat_max_qfull_cmds_dropped =
  4716. vha->hw->tgt.num_qfull_cmds_dropped;
  4717. ql_dbg(ql_dbg_io, vha, 0x3068,
  4718. "qla_target(%d): %s: QFull CMD dropped[%d]\n",
  4719. vha->vp_idx, __func__,
  4720. vha->hw->tgt.num_qfull_cmds_dropped);
  4721. qlt_chk_exch_leak_thresh_hold(vha);
  4722. return;
  4723. }
  4724. sess = ha->tgt.tgt_ops->find_sess_by_s_id
  4725. (vha, atio->u.isp24.fcp_hdr.s_id);
  4726. if (!sess)
  4727. return;
  4728. cmd = ha->tgt.tgt_ops->get_cmd(sess);
  4729. if (!cmd) {
  4730. ql_dbg(ql_dbg_io, vha, 0x3009,
  4731. "qla_target(%d): %s: Allocation of cmd failed\n",
  4732. vha->vp_idx, __func__);
  4733. vha->hw->tgt.num_qfull_cmds_dropped++;
  4734. if (vha->hw->tgt.num_qfull_cmds_dropped >
  4735. vha->qla_stats.stat_max_qfull_cmds_dropped)
  4736. vha->qla_stats.stat_max_qfull_cmds_dropped =
  4737. vha->hw->tgt.num_qfull_cmds_dropped;
  4738. qlt_chk_exch_leak_thresh_hold(vha);
  4739. return;
  4740. }
  4741. qlt_incr_num_pend_cmds(vha);
  4742. INIT_LIST_HEAD(&cmd->cmd_list);
  4743. memcpy(&cmd->atio, atio, sizeof(*atio));
  4744. cmd->tgt = vha->vha_tgt.qla_tgt;
  4745. cmd->vha = vha;
  4746. cmd->reset_count = ha->base_qpair->chip_reset;
  4747. cmd->q_full = 1;
  4748. cmd->qpair = ha->base_qpair;
  4749. if (qfull) {
  4750. cmd->q_full = 1;
  4751. /* NOTE: borrowing the state field to carry the status */
  4752. cmd->state = status;
  4753. } else
  4754. cmd->term_exchg = 1;
  4755. spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
  4756. list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
  4757. vha->hw->tgt.num_qfull_cmds_alloc++;
  4758. if (vha->hw->tgt.num_qfull_cmds_alloc >
  4759. vha->qla_stats.stat_max_qfull_cmds_alloc)
  4760. vha->qla_stats.stat_max_qfull_cmds_alloc =
  4761. vha->hw->tgt.num_qfull_cmds_alloc;
  4762. spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
  4763. }
  4764. int
  4765. qlt_free_qfull_cmds(struct qla_qpair *qpair)
  4766. {
  4767. struct scsi_qla_host *vha = qpair->vha;
  4768. struct qla_hw_data *ha = vha->hw;
  4769. unsigned long flags;
  4770. struct qla_tgt_cmd *cmd, *tcmd;
  4771. struct list_head free_list, q_full_list;
  4772. int rc = 0;
  4773. if (list_empty(&ha->tgt.q_full_list))
  4774. return 0;
  4775. INIT_LIST_HEAD(&free_list);
  4776. INIT_LIST_HEAD(&q_full_list);
  4777. spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
  4778. if (list_empty(&ha->tgt.q_full_list)) {
  4779. spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
  4780. return 0;
  4781. }
  4782. list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
  4783. spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
  4784. spin_lock_irqsave(qpair->qp_lock_ptr, flags);
  4785. list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
  4786. if (cmd->q_full)
  4787. /* cmd->state is a borrowed field to hold status */
  4788. rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
  4789. else if (cmd->term_exchg)
  4790. rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
  4791. if (rc == -ENOMEM)
  4792. break;
  4793. if (cmd->q_full)
  4794. ql_dbg(ql_dbg_io, vha, 0x3006,
  4795. "%s: busy sent for ox_id[%04x]\n", __func__,
  4796. be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
  4797. else if (cmd->term_exchg)
  4798. ql_dbg(ql_dbg_io, vha, 0x3007,
  4799. "%s: Term exchg sent for ox_id[%04x]\n", __func__,
  4800. be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
  4801. else
  4802. ql_dbg(ql_dbg_io, vha, 0x3008,
  4803. "%s: Unexpected cmd in QFull list %p\n", __func__,
  4804. cmd);
  4805. list_move_tail(&cmd->cmd_list, &free_list);
  4806. /* piggy back on hardware_lock for protection */
  4807. vha->hw->tgt.num_qfull_cmds_alloc--;
  4808. }
  4809. spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
  4810. cmd = NULL;
  4811. list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
  4812. list_del(&cmd->cmd_list);
  4813. /* This cmd was never sent to TCM. There is no need
  4814. * to schedule free or call free_cmd
  4815. */
  4816. qlt_free_cmd(cmd);
  4817. }
  4818. if (!list_empty(&q_full_list)) {
  4819. spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
  4820. list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
  4821. spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
  4822. }
  4823. return rc;
  4824. }
  4825. static void
  4826. qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
  4827. uint16_t status)
  4828. {
  4829. int rc = 0;
  4830. struct scsi_qla_host *vha = qpair->vha;
  4831. rc = __qlt_send_busy(qpair, atio, status);
  4832. if (rc == -ENOMEM)
  4833. qlt_alloc_qfull_cmd(vha, atio, status, 1);
  4834. }
  4835. static int
  4836. qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
  4837. struct atio_from_isp *atio, uint8_t ha_locked)
  4838. {
  4839. struct qla_hw_data *ha = vha->hw;
  4840. unsigned long flags;
  4841. if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
  4842. return 0;
  4843. if (!ha_locked)
  4844. spin_lock_irqsave(&ha->hardware_lock, flags);
  4845. qlt_send_busy(qpair, atio, qla_sam_status);
  4846. if (!ha_locked)
  4847. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  4848. return 1;
  4849. }
  4850. /* ha->hardware_lock supposed to be held on entry */
  4851. /* called via callback from qla2xxx */
  4852. static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
  4853. struct atio_from_isp *atio, uint8_t ha_locked)
  4854. {
  4855. struct qla_hw_data *ha = vha->hw;
  4856. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  4857. int rc;
  4858. unsigned long flags = 0;
  4859. if (unlikely(tgt == NULL)) {
  4860. ql_dbg(ql_dbg_tgt, vha, 0x3064,
  4861. "ATIO pkt, but no tgt (ha %p)", ha);
  4862. return;
  4863. }
  4864. /*
  4865. * In tgt_stop mode we also should allow all requests to pass.
  4866. * Otherwise, some commands can stuck.
  4867. */
  4868. tgt->atio_irq_cmd_count++;
  4869. switch (atio->u.raw.entry_type) {
  4870. case ATIO_TYPE7:
  4871. if (unlikely(atio->u.isp24.exchange_addr ==
  4872. cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) {
  4873. ql_dbg(ql_dbg_io, vha, 0x3065,
  4874. "qla_target(%d): ATIO_TYPE7 "
  4875. "received with UNKNOWN exchange address, "
  4876. "sending QUEUE_FULL\n", vha->vp_idx);
  4877. if (!ha_locked)
  4878. spin_lock_irqsave(&ha->hardware_lock, flags);
  4879. qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
  4880. if (!ha_locked)
  4881. spin_unlock_irqrestore(&ha->hardware_lock,
  4882. flags);
  4883. break;
  4884. }
  4885. if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
  4886. rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
  4887. atio, ha_locked);
  4888. if (rc != 0) {
  4889. tgt->atio_irq_cmd_count--;
  4890. return;
  4891. }
  4892. rc = qlt_handle_cmd_for_atio(vha, atio);
  4893. } else {
  4894. rc = qlt_handle_task_mgmt(vha, atio);
  4895. }
  4896. if (unlikely(rc != 0)) {
  4897. if (!ha_locked)
  4898. spin_lock_irqsave(&ha->hardware_lock, flags);
  4899. switch (rc) {
  4900. case -ENODEV:
  4901. ql_dbg(ql_dbg_tgt, vha, 0xe05f,
  4902. "qla_target: Unable to send command to target\n");
  4903. break;
  4904. case -EBADF:
  4905. ql_dbg(ql_dbg_tgt, vha, 0xe05f,
  4906. "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
  4907. qlt_send_term_exchange(ha->base_qpair, NULL,
  4908. atio, 1, 0);
  4909. break;
  4910. case -EBUSY:
  4911. ql_dbg(ql_dbg_tgt, vha, 0xe060,
  4912. "qla_target(%d): Unable to send command to target, sending BUSY status\n",
  4913. vha->vp_idx);
  4914. qlt_send_busy(ha->base_qpair, atio,
  4915. tc_sam_status);
  4916. break;
  4917. default:
  4918. ql_dbg(ql_dbg_tgt, vha, 0xe060,
  4919. "qla_target(%d): Unable to send command to target, sending BUSY status\n",
  4920. vha->vp_idx);
  4921. qlt_send_busy(ha->base_qpair, atio,
  4922. qla_sam_status);
  4923. break;
  4924. }
  4925. if (!ha_locked)
  4926. spin_unlock_irqrestore(&ha->hardware_lock,
  4927. flags);
  4928. }
  4929. break;
  4930. case IMMED_NOTIFY_TYPE:
  4931. {
  4932. if (unlikely(atio->u.isp2x.entry_status != 0)) {
  4933. ql_dbg(ql_dbg_tgt, vha, 0xe05b,
  4934. "qla_target(%d): Received ATIO packet %x "
  4935. "with error status %x\n", vha->vp_idx,
  4936. atio->u.raw.entry_type,
  4937. atio->u.isp2x.entry_status);
  4938. break;
  4939. }
  4940. ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
  4941. if (!ha_locked)
  4942. spin_lock_irqsave(&ha->hardware_lock, flags);
  4943. qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
  4944. if (!ha_locked)
  4945. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  4946. break;
  4947. }
  4948. default:
  4949. ql_dbg(ql_dbg_tgt, vha, 0xe05c,
  4950. "qla_target(%d): Received unknown ATIO atio "
  4951. "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
  4952. break;
  4953. }
  4954. tgt->atio_irq_cmd_count--;
  4955. }
  4956. /*
  4957. * qpair lock is assume to be held
  4958. * rc = 0 : send terminate & abts respond
  4959. * rc != 0: do not send term & abts respond
  4960. */
  4961. static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
  4962. struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry)
  4963. {
  4964. struct qla_hw_data *ha = vha->hw;
  4965. int rc = 0;
  4966. /*
  4967. * Detect unresolved exchange. If the same ABTS is unable
  4968. * to terminate an existing command and the same ABTS loops
  4969. * between FW & Driver, then force FW dump. Under 1 jiff,
  4970. * we should see multiple loops.
  4971. */
  4972. if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort &&
  4973. qpair->retry_term_jiff == jiffies) {
  4974. /* found existing exchange */
  4975. qpair->retry_term_cnt++;
  4976. if (qpair->retry_term_cnt >= 5) {
  4977. rc = -EIO;
  4978. qpair->retry_term_cnt = 0;
  4979. ql_log(ql_log_warn, vha, 0xffff,
  4980. "Unable to send ABTS Respond. Dumping firmware.\n");
  4981. ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer,
  4982. vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
  4983. if (qpair == ha->base_qpair)
  4984. ha->isp_ops->fw_dump(vha);
  4985. else
  4986. qla2xxx_dump_fw(vha);
  4987. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  4988. qla2xxx_wake_dpc(vha);
  4989. }
  4990. } else if (qpair->retry_term_jiff != jiffies) {
  4991. qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort;
  4992. qpair->retry_term_cnt = 0;
  4993. qpair->retry_term_jiff = jiffies;
  4994. }
  4995. return rc;
  4996. }
  4997. static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
  4998. struct rsp_que *rsp, response_t *pkt)
  4999. {
  5000. struct abts_resp_from_24xx_fw *entry =
  5001. (struct abts_resp_from_24xx_fw *)pkt;
  5002. u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
  5003. struct qla_tgt_mgmt_cmd *mcmd;
  5004. struct qla_hw_data *ha = vha->hw;
  5005. mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
  5006. if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
  5007. ql_dbg(ql_dbg_async, vha, 0xe064,
  5008. "qla_target(%d): ABTS Comp without mcmd\n",
  5009. vha->vp_idx);
  5010. return;
  5011. }
  5012. if (mcmd)
  5013. vha = mcmd->vha;
  5014. vha->vha_tgt.qla_tgt->abts_resp_expected--;
  5015. ql_dbg(ql_dbg_tgt, vha, 0xe038,
  5016. "ABTS_RESP_24XX: compl_status %x\n",
  5017. entry->compl_status);
  5018. if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
  5019. if (le32_to_cpu(entry->error_subcode1) == 0x1E &&
  5020. le32_to_cpu(entry->error_subcode2) == 0) {
  5021. if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
  5022. ha->tgt.tgt_ops->free_mcmd(mcmd);
  5023. return;
  5024. }
  5025. qlt_24xx_retry_term_exchange(vha, rsp->qpair,
  5026. pkt, mcmd);
  5027. } else {
  5028. ql_dbg(ql_dbg_tgt, vha, 0xe063,
  5029. "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
  5030. vha->vp_idx, entry->compl_status,
  5031. entry->error_subcode1,
  5032. entry->error_subcode2);
  5033. ha->tgt.tgt_ops->free_mcmd(mcmd);
  5034. }
  5035. } else if (mcmd) {
  5036. ha->tgt.tgt_ops->free_mcmd(mcmd);
  5037. }
  5038. }
  5039. /* ha->hardware_lock supposed to be held on entry */
  5040. /* called via callback from qla2xxx */
  5041. static void qlt_response_pkt(struct scsi_qla_host *vha,
  5042. struct rsp_que *rsp, response_t *pkt)
  5043. {
  5044. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  5045. if (unlikely(tgt == NULL)) {
  5046. ql_dbg(ql_dbg_tgt, vha, 0xe05d,
  5047. "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
  5048. vha->vp_idx, pkt->entry_type, vha->hw);
  5049. return;
  5050. }
  5051. /*
  5052. * In tgt_stop mode we also should allow all requests to pass.
  5053. * Otherwise, some commands can stuck.
  5054. */
  5055. switch (pkt->entry_type) {
  5056. case CTIO_CRC2:
  5057. case CTIO_TYPE7:
  5058. {
  5059. struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
  5060. qlt_do_ctio_completion(vha, rsp, entry->handle,
  5061. le16_to_cpu(entry->status)|(pkt->entry_status << 16),
  5062. entry);
  5063. break;
  5064. }
  5065. case ACCEPT_TGT_IO_TYPE:
  5066. {
  5067. struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
  5068. int rc;
  5069. if (atio->u.isp2x.status !=
  5070. cpu_to_le16(ATIO_CDB_VALID)) {
  5071. ql_dbg(ql_dbg_tgt, vha, 0xe05e,
  5072. "qla_target(%d): ATIO with error "
  5073. "status %x received\n", vha->vp_idx,
  5074. le16_to_cpu(atio->u.isp2x.status));
  5075. break;
  5076. }
  5077. rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
  5078. if (rc != 0)
  5079. return;
  5080. rc = qlt_handle_cmd_for_atio(vha, atio);
  5081. if (unlikely(rc != 0)) {
  5082. switch (rc) {
  5083. case -ENODEV:
  5084. ql_dbg(ql_dbg_tgt, vha, 0xe05f,
  5085. "qla_target: Unable to send command to target\n");
  5086. break;
  5087. case -EBADF:
  5088. ql_dbg(ql_dbg_tgt, vha, 0xe05f,
  5089. "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
  5090. qlt_send_term_exchange(rsp->qpair, NULL,
  5091. atio, 1, 0);
  5092. break;
  5093. case -EBUSY:
  5094. ql_dbg(ql_dbg_tgt, vha, 0xe060,
  5095. "qla_target(%d): Unable to send command to target, sending BUSY status\n",
  5096. vha->vp_idx);
  5097. qlt_send_busy(rsp->qpair, atio,
  5098. tc_sam_status);
  5099. break;
  5100. default:
  5101. ql_dbg(ql_dbg_tgt, vha, 0xe060,
  5102. "qla_target(%d): Unable to send command to target, sending BUSY status\n",
  5103. vha->vp_idx);
  5104. qlt_send_busy(rsp->qpair, atio,
  5105. qla_sam_status);
  5106. break;
  5107. }
  5108. }
  5109. }
  5110. break;
  5111. case CONTINUE_TGT_IO_TYPE:
  5112. {
  5113. struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
  5114. qlt_do_ctio_completion(vha, rsp, entry->handle,
  5115. le16_to_cpu(entry->status)|(pkt->entry_status << 16),
  5116. entry);
  5117. break;
  5118. }
  5119. case CTIO_A64_TYPE:
  5120. {
  5121. struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
  5122. qlt_do_ctio_completion(vha, rsp, entry->handle,
  5123. le16_to_cpu(entry->status)|(pkt->entry_status << 16),
  5124. entry);
  5125. break;
  5126. }
  5127. case IMMED_NOTIFY_TYPE:
  5128. ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
  5129. qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
  5130. break;
  5131. case NOTIFY_ACK_TYPE:
  5132. if (tgt->notify_ack_expected > 0) {
  5133. struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
  5134. ql_dbg(ql_dbg_tgt, vha, 0xe036,
  5135. "NOTIFY_ACK seq %08x status %x\n",
  5136. le16_to_cpu(entry->u.isp2x.seq_id),
  5137. le16_to_cpu(entry->u.isp2x.status));
  5138. tgt->notify_ack_expected--;
  5139. if (entry->u.isp2x.status !=
  5140. cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
  5141. ql_dbg(ql_dbg_tgt, vha, 0xe061,
  5142. "qla_target(%d): NOTIFY_ACK "
  5143. "failed %x\n", vha->vp_idx,
  5144. le16_to_cpu(entry->u.isp2x.status));
  5145. }
  5146. } else {
  5147. ql_dbg(ql_dbg_tgt, vha, 0xe062,
  5148. "qla_target(%d): Unexpected NOTIFY_ACK received\n",
  5149. vha->vp_idx);
  5150. }
  5151. break;
  5152. case ABTS_RECV_24XX:
  5153. ql_dbg(ql_dbg_tgt, vha, 0xe037,
  5154. "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
  5155. qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
  5156. break;
  5157. case ABTS_RESP_24XX:
  5158. if (tgt->abts_resp_expected > 0) {
  5159. qlt_handle_abts_completion(vha, rsp, pkt);
  5160. } else {
  5161. ql_dbg(ql_dbg_tgt, vha, 0xe064,
  5162. "qla_target(%d): Unexpected ABTS_RESP_24XX "
  5163. "received\n", vha->vp_idx);
  5164. }
  5165. break;
  5166. default:
  5167. ql_dbg(ql_dbg_tgt, vha, 0xe065,
  5168. "qla_target(%d): Received unknown response pkt "
  5169. "type %x\n", vha->vp_idx, pkt->entry_type);
  5170. break;
  5171. }
  5172. }
  5173. /*
  5174. * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  5175. */
  5176. void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
  5177. uint16_t *mailbox)
  5178. {
  5179. struct qla_hw_data *ha = vha->hw;
  5180. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  5181. int login_code;
  5182. if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
  5183. return;
  5184. if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
  5185. IS_QLA2100(ha))
  5186. return;
  5187. /*
  5188. * In tgt_stop mode we also should allow all requests to pass.
  5189. * Otherwise, some commands can stuck.
  5190. */
  5191. switch (code) {
  5192. case MBA_RESET: /* Reset */
  5193. case MBA_SYSTEM_ERR: /* System Error */
  5194. case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
  5195. case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
  5196. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
  5197. "qla_target(%d): System error async event %#x "
  5198. "occurred", vha->vp_idx, code);
  5199. break;
  5200. case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
  5201. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  5202. break;
  5203. case MBA_LOOP_UP:
  5204. {
  5205. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
  5206. "qla_target(%d): Async LOOP_UP occurred "
  5207. "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
  5208. mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
  5209. if (tgt->link_reinit_iocb_pending) {
  5210. qlt_send_notify_ack(ha->base_qpair,
  5211. &tgt->link_reinit_iocb,
  5212. 0, 0, 0, 0, 0, 0);
  5213. tgt->link_reinit_iocb_pending = 0;
  5214. }
  5215. break;
  5216. }
  5217. case MBA_LIP_OCCURRED:
  5218. case MBA_LOOP_DOWN:
  5219. case MBA_LIP_RESET:
  5220. case MBA_RSCN_UPDATE:
  5221. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
  5222. "qla_target(%d): Async event %#x occurred "
  5223. "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
  5224. mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
  5225. break;
  5226. case MBA_REJECTED_FCP_CMD:
  5227. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
  5228. "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
  5229. vha->vp_idx,
  5230. mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
  5231. if (mailbox[3] == 1) {
  5232. /* exchange starvation. */
  5233. vha->hw->exch_starvation++;
  5234. if (vha->hw->exch_starvation > 5) {
  5235. ql_log(ql_log_warn, vha, 0xd03a,
  5236. "Exchange starvation-. Resetting RISC\n");
  5237. vha->hw->exch_starvation = 0;
  5238. if (IS_P3P_TYPE(vha->hw))
  5239. set_bit(FCOE_CTX_RESET_NEEDED,
  5240. &vha->dpc_flags);
  5241. else
  5242. set_bit(ISP_ABORT_NEEDED,
  5243. &vha->dpc_flags);
  5244. qla2xxx_wake_dpc(vha);
  5245. }
  5246. }
  5247. break;
  5248. case MBA_PORT_UPDATE:
  5249. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
  5250. "qla_target(%d): Port update async event %#x "
  5251. "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
  5252. "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
  5253. mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
  5254. login_code = mailbox[2];
  5255. if (login_code == 0x4) {
  5256. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
  5257. "Async MB 2: Got PLOGI Complete\n");
  5258. vha->hw->exch_starvation = 0;
  5259. } else if (login_code == 0x7)
  5260. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
  5261. "Async MB 2: Port Logged Out\n");
  5262. break;
  5263. default:
  5264. break;
  5265. }
  5266. }
  5267. static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
  5268. uint16_t loop_id)
  5269. {
  5270. fc_port_t *fcport, *tfcp, *del;
  5271. int rc;
  5272. unsigned long flags;
  5273. u8 newfcport = 0;
  5274. fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  5275. if (!fcport) {
  5276. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
  5277. "qla_target(%d): Allocation of tmp FC port failed",
  5278. vha->vp_idx);
  5279. return NULL;
  5280. }
  5281. fcport->loop_id = loop_id;
  5282. rc = qla24xx_gpdb_wait(vha, fcport, 0);
  5283. if (rc != QLA_SUCCESS) {
  5284. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
  5285. "qla_target(%d): Failed to retrieve fcport "
  5286. "information -- get_port_database() returned %x "
  5287. "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
  5288. kfree(fcport);
  5289. return NULL;
  5290. }
  5291. del = NULL;
  5292. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  5293. tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
  5294. if (tfcp) {
  5295. tfcp->d_id = fcport->d_id;
  5296. tfcp->port_type = fcport->port_type;
  5297. tfcp->supported_classes = fcport->supported_classes;
  5298. tfcp->flags |= fcport->flags;
  5299. tfcp->scan_state = QLA_FCPORT_FOUND;
  5300. del = fcport;
  5301. fcport = tfcp;
  5302. } else {
  5303. if (vha->hw->current_topology == ISP_CFG_F)
  5304. fcport->flags |= FCF_FABRIC_DEVICE;
  5305. list_add_tail(&fcport->list, &vha->vp_fcports);
  5306. if (!IS_SW_RESV_ADDR(fcport->d_id))
  5307. vha->fcport_count++;
  5308. fcport->login_gen++;
  5309. qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
  5310. fcport->login_succ = 1;
  5311. newfcport = 1;
  5312. }
  5313. fcport->deleted = 0;
  5314. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  5315. switch (vha->host->active_mode) {
  5316. case MODE_INITIATOR:
  5317. case MODE_DUAL:
  5318. if (newfcport) {
  5319. if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
  5320. qla24xx_sched_upd_fcport(fcport);
  5321. } else {
  5322. ql_dbg(ql_dbg_disc, vha, 0x20ff,
  5323. "%s %d %8phC post gpsc fcp_cnt %d\n",
  5324. __func__, __LINE__, fcport->port_name, vha->fcport_count);
  5325. qla24xx_post_gpsc_work(vha, fcport);
  5326. }
  5327. }
  5328. break;
  5329. case MODE_TARGET:
  5330. default:
  5331. break;
  5332. }
  5333. if (del)
  5334. qla2x00_free_fcport(del);
  5335. return fcport;
  5336. }
  5337. /* Must be called under tgt_mutex */
  5338. static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
  5339. be_id_t s_id)
  5340. {
  5341. struct fc_port *sess = NULL;
  5342. fc_port_t *fcport = NULL;
  5343. int rc, global_resets;
  5344. uint16_t loop_id = 0;
  5345. if (s_id.domain == 0xFF && s_id.area == 0xFC) {
  5346. /*
  5347. * This is Domain Controller, so it should be
  5348. * OK to drop SCSI commands from it.
  5349. */
  5350. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
  5351. "Unable to find initiator with S_ID %x:%x:%x",
  5352. s_id.domain, s_id.area, s_id.al_pa);
  5353. return NULL;
  5354. }
  5355. mutex_lock(&vha->vha_tgt.tgt_mutex);
  5356. retry:
  5357. global_resets =
  5358. atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
  5359. rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
  5360. if (rc != 0) {
  5361. mutex_unlock(&vha->vha_tgt.tgt_mutex);
  5362. ql_log(ql_log_info, vha, 0xf071,
  5363. "qla_target(%d): Unable to find "
  5364. "initiator with S_ID %x:%x:%x",
  5365. vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa);
  5366. if (rc == -ENOENT) {
  5367. qlt_port_logo_t logo;
  5368. logo.id = be_to_port_id(s_id);
  5369. logo.cmd_count = 1;
  5370. qlt_send_first_logo(vha, &logo);
  5371. }
  5372. return NULL;
  5373. }
  5374. fcport = qlt_get_port_database(vha, loop_id);
  5375. if (!fcport) {
  5376. mutex_unlock(&vha->vha_tgt.tgt_mutex);
  5377. return NULL;
  5378. }
  5379. if (global_resets !=
  5380. atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
  5381. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
  5382. "qla_target(%d): global reset during session discovery "
  5383. "(counter was %d, new %d), retrying", vha->vp_idx,
  5384. global_resets,
  5385. atomic_read(&vha->vha_tgt.
  5386. qla_tgt->tgt_global_resets_count));
  5387. goto retry;
  5388. }
  5389. sess = qlt_create_sess(vha, fcport, true);
  5390. mutex_unlock(&vha->vha_tgt.tgt_mutex);
  5391. return sess;
  5392. }
  5393. static void qlt_abort_work(struct qla_tgt *tgt,
  5394. struct qla_tgt_sess_work_param *prm)
  5395. {
  5396. struct scsi_qla_host *vha = tgt->vha;
  5397. struct qla_hw_data *ha = vha->hw;
  5398. struct fc_port *sess = NULL;
  5399. unsigned long flags = 0, flags2 = 0;
  5400. be_id_t s_id;
  5401. int rc;
  5402. spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
  5403. if (tgt->tgt_stop)
  5404. goto out_term2;
  5405. s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id);
  5406. sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
  5407. if (!sess) {
  5408. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
  5409. sess = qlt_make_local_sess(vha, s_id);
  5410. /* sess has got an extra creation ref */
  5411. spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
  5412. if (!sess)
  5413. goto out_term2;
  5414. } else {
  5415. if (sess->deleted) {
  5416. sess = NULL;
  5417. goto out_term2;
  5418. }
  5419. if (!kref_get_unless_zero(&sess->sess_kref)) {
  5420. ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
  5421. "%s: kref_get fail %8phC \n",
  5422. __func__, sess->port_name);
  5423. sess = NULL;
  5424. goto out_term2;
  5425. }
  5426. }
  5427. rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
  5428. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
  5429. ha->tgt.tgt_ops->put_sess(sess);
  5430. if (rc != 0)
  5431. goto out_term;
  5432. return;
  5433. out_term2:
  5434. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
  5435. out_term:
  5436. spin_lock_irqsave(&ha->hardware_lock, flags);
  5437. qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
  5438. FCP_TMF_REJECTED, false);
  5439. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  5440. }
  5441. static void qlt_sess_work_fn(struct work_struct *work)
  5442. {
  5443. struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
  5444. struct scsi_qla_host *vha = tgt->vha;
  5445. unsigned long flags;
  5446. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
  5447. spin_lock_irqsave(&tgt->sess_work_lock, flags);
  5448. while (!list_empty(&tgt->sess_works_list)) {
  5449. struct qla_tgt_sess_work_param *prm = list_entry(
  5450. tgt->sess_works_list.next, typeof(*prm),
  5451. sess_works_list_entry);
  5452. /*
  5453. * This work can be scheduled on several CPUs at time, so we
  5454. * must delete the entry to eliminate double processing
  5455. */
  5456. list_del(&prm->sess_works_list_entry);
  5457. spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
  5458. switch (prm->type) {
  5459. case QLA_TGT_SESS_WORK_ABORT:
  5460. qlt_abort_work(tgt, prm);
  5461. break;
  5462. default:
  5463. BUG_ON(1);
  5464. break;
  5465. }
  5466. spin_lock_irqsave(&tgt->sess_work_lock, flags);
  5467. kfree(prm);
  5468. }
  5469. spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
  5470. }
  5471. /* Must be called under tgt_host_action_mutex */
  5472. int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
  5473. {
  5474. struct qla_tgt *tgt;
  5475. int rc, i;
  5476. struct qla_qpair_hint *h;
  5477. if (!QLA_TGT_MODE_ENABLED())
  5478. return 0;
  5479. if (!IS_TGT_MODE_CAPABLE(ha)) {
  5480. ql_log(ql_log_warn, base_vha, 0xe070,
  5481. "This adapter does not support target mode.\n");
  5482. return 0;
  5483. }
  5484. ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
  5485. "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
  5486. BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
  5487. tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
  5488. if (!tgt) {
  5489. ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
  5490. "Unable to allocate struct qla_tgt\n");
  5491. return -ENOMEM;
  5492. }
  5493. tgt->qphints = kcalloc(ha->max_qpairs + 1,
  5494. sizeof(struct qla_qpair_hint),
  5495. GFP_KERNEL);
  5496. if (!tgt->qphints) {
  5497. kfree(tgt);
  5498. ql_log(ql_log_warn, base_vha, 0x0197,
  5499. "Unable to allocate qpair hints.\n");
  5500. return -ENOMEM;
  5501. }
  5502. if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
  5503. base_vha->host->hostt->supported_mode |= MODE_TARGET;
  5504. rc = btree_init64(&tgt->lun_qpair_map);
  5505. if (rc) {
  5506. kfree(tgt->qphints);
  5507. kfree(tgt);
  5508. ql_log(ql_log_info, base_vha, 0x0198,
  5509. "Unable to initialize lun_qpair_map btree\n");
  5510. return -EIO;
  5511. }
  5512. h = &tgt->qphints[0];
  5513. h->qpair = ha->base_qpair;
  5514. INIT_LIST_HEAD(&h->hint_elem);
  5515. h->cpuid = ha->base_qpair->cpuid;
  5516. list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list);
  5517. for (i = 0; i < ha->max_qpairs; i++) {
  5518. unsigned long flags;
  5519. struct qla_qpair *qpair = ha->queue_pair_map[i];
  5520. h = &tgt->qphints[i + 1];
  5521. INIT_LIST_HEAD(&h->hint_elem);
  5522. if (qpair) {
  5523. h->qpair = qpair;
  5524. spin_lock_irqsave(qpair->qp_lock_ptr, flags);
  5525. list_add_tail(&h->hint_elem, &qpair->hints_list);
  5526. spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
  5527. h->cpuid = qpair->cpuid;
  5528. }
  5529. }
  5530. tgt->ha = ha;
  5531. tgt->vha = base_vha;
  5532. init_waitqueue_head(&tgt->waitQ);
  5533. spin_lock_init(&tgt->sess_work_lock);
  5534. INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
  5535. INIT_LIST_HEAD(&tgt->sess_works_list);
  5536. atomic_set(&tgt->tgt_global_resets_count, 0);
  5537. base_vha->vha_tgt.qla_tgt = tgt;
  5538. ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
  5539. "qla_target(%d): using 64 Bit PCI addressing",
  5540. base_vha->vp_idx);
  5541. /* 3 is reserved */
  5542. tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
  5543. mutex_lock(&qla_tgt_mutex);
  5544. list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
  5545. mutex_unlock(&qla_tgt_mutex);
  5546. if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
  5547. ha->tgt.tgt_ops->add_target(base_vha);
  5548. return 0;
  5549. }
  5550. /* Must be called under tgt_host_action_mutex */
  5551. int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
  5552. {
  5553. if (!vha->vha_tgt.qla_tgt)
  5554. return 0;
  5555. if (vha->fc_vport) {
  5556. qlt_release(vha->vha_tgt.qla_tgt);
  5557. return 0;
  5558. }
  5559. /* free left over qfull cmds */
  5560. qlt_init_term_exchange(vha);
  5561. ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
  5562. vha->host_no, ha);
  5563. qlt_release(vha->vha_tgt.qla_tgt);
  5564. return 0;
  5565. }
  5566. void qla_remove_hostmap(struct qla_hw_data *ha)
  5567. {
  5568. struct scsi_qla_host *node;
  5569. u32 key = 0;
  5570. btree_for_each_safe32(&ha->host_map, key, node)
  5571. btree_remove32(&ha->host_map, key);
  5572. btree_destroy32(&ha->host_map);
  5573. }
  5574. static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
  5575. unsigned char *b)
  5576. {
  5577. pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name);
  5578. pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name);
  5579. put_unaligned_be64(wwpn, b);
  5580. pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b);
  5581. }
  5582. /**
  5583. * qlt_lport_register - register lport with external module
  5584. *
  5585. * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
  5586. * @phys_wwpn: physical port WWPN
  5587. * @npiv_wwpn: NPIV WWPN
  5588. * @npiv_wwnn: NPIV WWNN
  5589. * @callback: lport initialization callback for tcm_qla2xxx code
  5590. */
  5591. int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
  5592. u64 npiv_wwpn, u64 npiv_wwnn,
  5593. int (*callback)(struct scsi_qla_host *, void *, u64, u64))
  5594. {
  5595. struct qla_tgt *tgt;
  5596. struct scsi_qla_host *vha;
  5597. struct qla_hw_data *ha;
  5598. struct Scsi_Host *host;
  5599. unsigned long flags;
  5600. int rc;
  5601. u8 b[WWN_SIZE];
  5602. mutex_lock(&qla_tgt_mutex);
  5603. list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
  5604. vha = tgt->vha;
  5605. ha = vha->hw;
  5606. host = vha->host;
  5607. if (!host)
  5608. continue;
  5609. if (!(host->hostt->supported_mode & MODE_TARGET))
  5610. continue;
  5611. if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
  5612. continue;
  5613. spin_lock_irqsave(&ha->hardware_lock, flags);
  5614. if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
  5615. pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
  5616. host->host_no);
  5617. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  5618. continue;
  5619. }
  5620. if (tgt->tgt_stop) {
  5621. pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
  5622. host->host_no);
  5623. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  5624. continue;
  5625. }
  5626. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  5627. if (!scsi_host_get(host)) {
  5628. ql_dbg(ql_dbg_tgt, vha, 0xe068,
  5629. "Unable to scsi_host_get() for"
  5630. " qla2xxx scsi_host\n");
  5631. continue;
  5632. }
  5633. qlt_lport_dump(vha, phys_wwpn, b);
  5634. if (memcmp(vha->port_name, b, WWN_SIZE)) {
  5635. scsi_host_put(host);
  5636. continue;
  5637. }
  5638. rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
  5639. if (rc != 0)
  5640. scsi_host_put(host);
  5641. mutex_unlock(&qla_tgt_mutex);
  5642. return rc;
  5643. }
  5644. mutex_unlock(&qla_tgt_mutex);
  5645. return -ENODEV;
  5646. }
  5647. EXPORT_SYMBOL(qlt_lport_register);
  5648. /**
  5649. * qlt_lport_deregister - Degister lport
  5650. *
  5651. * @vha: Registered scsi_qla_host pointer
  5652. */
  5653. void qlt_lport_deregister(struct scsi_qla_host *vha)
  5654. {
  5655. struct qla_hw_data *ha = vha->hw;
  5656. struct Scsi_Host *sh = vha->host;
  5657. /*
  5658. * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
  5659. */
  5660. vha->vha_tgt.target_lport_ptr = NULL;
  5661. ha->tgt.tgt_ops = NULL;
  5662. /*
  5663. * Release the Scsi_Host reference for the underlying qla2xxx host
  5664. */
  5665. scsi_host_put(sh);
  5666. }
  5667. EXPORT_SYMBOL(qlt_lport_deregister);
  5668. /* Must be called under HW lock */
  5669. void qlt_set_mode(struct scsi_qla_host *vha)
  5670. {
  5671. switch (vha->qlini_mode) {
  5672. case QLA2XXX_INI_MODE_DISABLED:
  5673. case QLA2XXX_INI_MODE_EXCLUSIVE:
  5674. vha->host->active_mode = MODE_TARGET;
  5675. break;
  5676. case QLA2XXX_INI_MODE_ENABLED:
  5677. vha->host->active_mode = MODE_INITIATOR;
  5678. break;
  5679. case QLA2XXX_INI_MODE_DUAL:
  5680. vha->host->active_mode = MODE_DUAL;
  5681. break;
  5682. default:
  5683. break;
  5684. }
  5685. }
  5686. /* Must be called under HW lock */
  5687. static void qlt_clear_mode(struct scsi_qla_host *vha)
  5688. {
  5689. switch (vha->qlini_mode) {
  5690. case QLA2XXX_INI_MODE_DISABLED:
  5691. vha->host->active_mode = MODE_UNKNOWN;
  5692. break;
  5693. case QLA2XXX_INI_MODE_EXCLUSIVE:
  5694. vha->host->active_mode = MODE_INITIATOR;
  5695. break;
  5696. case QLA2XXX_INI_MODE_ENABLED:
  5697. case QLA2XXX_INI_MODE_DUAL:
  5698. vha->host->active_mode = MODE_INITIATOR;
  5699. break;
  5700. default:
  5701. break;
  5702. }
  5703. }
  5704. /*
  5705. * qla_tgt_enable_vha - NO LOCK HELD
  5706. *
  5707. * host_reset, bring up w/ Target Mode Enabled
  5708. */
  5709. void
  5710. qlt_enable_vha(struct scsi_qla_host *vha)
  5711. {
  5712. struct qla_hw_data *ha = vha->hw;
  5713. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  5714. unsigned long flags;
  5715. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  5716. if (!tgt) {
  5717. ql_dbg(ql_dbg_tgt, vha, 0xe069,
  5718. "Unable to locate qla_tgt pointer from"
  5719. " struct qla_hw_data\n");
  5720. dump_stack();
  5721. return;
  5722. }
  5723. if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
  5724. return;
  5725. if (ha->tgt.num_act_qpairs > ha->max_qpairs)
  5726. ha->tgt.num_act_qpairs = ha->max_qpairs;
  5727. spin_lock_irqsave(&ha->hardware_lock, flags);
  5728. tgt->tgt_stopped = 0;
  5729. qlt_set_mode(vha);
  5730. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  5731. mutex_lock(&ha->optrom_mutex);
  5732. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
  5733. "%s.\n", __func__);
  5734. if (vha->vp_idx) {
  5735. qla24xx_disable_vp(vha);
  5736. qla24xx_enable_vp(vha);
  5737. } else {
  5738. set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
  5739. qla2xxx_wake_dpc(base_vha);
  5740. WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) !=
  5741. QLA_SUCCESS);
  5742. }
  5743. mutex_unlock(&ha->optrom_mutex);
  5744. }
  5745. EXPORT_SYMBOL(qlt_enable_vha);
  5746. /*
  5747. * qla_tgt_disable_vha - NO LOCK HELD
  5748. *
  5749. * Disable Target Mode and reset the adapter
  5750. */
  5751. static void qlt_disable_vha(struct scsi_qla_host *vha)
  5752. {
  5753. struct qla_hw_data *ha = vha->hw;
  5754. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  5755. unsigned long flags;
  5756. if (!tgt) {
  5757. ql_dbg(ql_dbg_tgt, vha, 0xe06a,
  5758. "Unable to locate qla_tgt pointer from"
  5759. " struct qla_hw_data\n");
  5760. dump_stack();
  5761. return;
  5762. }
  5763. spin_lock_irqsave(&ha->hardware_lock, flags);
  5764. qlt_clear_mode(vha);
  5765. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  5766. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  5767. qla2xxx_wake_dpc(vha);
  5768. /*
  5769. * We are expecting the offline state.
  5770. * QLA_FUNCTION_FAILED means that adapter is offline.
  5771. */
  5772. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
  5773. ql_dbg(ql_dbg_tgt, vha, 0xe081,
  5774. "adapter is offline\n");
  5775. }
  5776. /*
  5777. * Called from qla_init.c:qla24xx_vport_create() contex to setup
  5778. * the target mode specific struct scsi_qla_host and struct qla_hw_data
  5779. * members.
  5780. */
  5781. void
  5782. qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
  5783. {
  5784. vha->vha_tgt.qla_tgt = NULL;
  5785. mutex_init(&vha->vha_tgt.tgt_mutex);
  5786. mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
  5787. qlt_clear_mode(vha);
  5788. /*
  5789. * NOTE: Currently the value is kept the same for <24xx and
  5790. * >=24xx ISPs. If it is necessary to change it,
  5791. * the check should be added for specific ISPs,
  5792. * assigning the value appropriately.
  5793. */
  5794. ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
  5795. qlt_add_target(ha, vha);
  5796. }
  5797. u8
  5798. qlt_rff_id(struct scsi_qla_host *vha)
  5799. {
  5800. u8 fc4_feature = 0;
  5801. /*
  5802. * FC-4 Feature bit 0 indicates target functionality to the name server.
  5803. */
  5804. if (qla_tgt_mode_enabled(vha)) {
  5805. fc4_feature = BIT_0;
  5806. } else if (qla_ini_mode_enabled(vha)) {
  5807. fc4_feature = BIT_1;
  5808. } else if (qla_dual_mode_enabled(vha))
  5809. fc4_feature = BIT_0 | BIT_1;
  5810. return fc4_feature;
  5811. }
  5812. /*
  5813. * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
  5814. * @ha: HA context
  5815. *
  5816. * Beginning of ATIO ring has initialization control block already built
  5817. * by nvram config routine.
  5818. *
  5819. * Returns 0 on success.
  5820. */
  5821. void
  5822. qlt_init_atio_q_entries(struct scsi_qla_host *vha)
  5823. {
  5824. struct qla_hw_data *ha = vha->hw;
  5825. uint16_t cnt;
  5826. struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
  5827. if (qla_ini_mode_enabled(vha))
  5828. return;
  5829. for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
  5830. pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
  5831. pkt++;
  5832. }
  5833. }
  5834. /*
  5835. * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
  5836. * @ha: SCSI driver HA context
  5837. */
  5838. void
  5839. qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
  5840. {
  5841. struct qla_hw_data *ha = vha->hw;
  5842. struct atio_from_isp *pkt;
  5843. int cnt, i;
  5844. if (!ha->flags.fw_started)
  5845. return;
  5846. while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
  5847. fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
  5848. pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
  5849. cnt = pkt->u.raw.entry_count;
  5850. if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
  5851. /*
  5852. * This packet is corrupted. The header + payload
  5853. * can not be trusted. There is no point in passing
  5854. * it further up.
  5855. */
  5856. ql_log(ql_log_warn, vha, 0xd03c,
  5857. "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
  5858. &pkt->u.isp24.fcp_hdr.s_id,
  5859. be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
  5860. pkt->u.isp24.exchange_addr, pkt);
  5861. adjust_corrupted_atio(pkt);
  5862. qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
  5863. ha_locked, 0);
  5864. } else {
  5865. qlt_24xx_atio_pkt_all_vps(vha,
  5866. (struct atio_from_isp *)pkt, ha_locked);
  5867. }
  5868. for (i = 0; i < cnt; i++) {
  5869. ha->tgt.atio_ring_index++;
  5870. if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
  5871. ha->tgt.atio_ring_index = 0;
  5872. ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
  5873. } else
  5874. ha->tgt.atio_ring_ptr++;
  5875. pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
  5876. pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
  5877. }
  5878. wmb();
  5879. }
  5880. /* Adjust ring index */
  5881. wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
  5882. }
  5883. void
  5884. qlt_24xx_config_rings(struct scsi_qla_host *vha)
  5885. {
  5886. struct qla_hw_data *ha = vha->hw;
  5887. struct qla_msix_entry *msix = &ha->msix_entries[2];
  5888. struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
  5889. if (!QLA_TGT_MODE_ENABLED())
  5890. return;
  5891. wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0);
  5892. wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0);
  5893. rd_reg_dword(ISP_ATIO_Q_OUT(vha));
  5894. if (ha->flags.msix_enabled) {
  5895. if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
  5896. icb->msix_atio = cpu_to_le16(msix->entry);
  5897. icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
  5898. ql_dbg(ql_dbg_init, vha, 0xf072,
  5899. "Registering ICB vector 0x%x for atio que.\n",
  5900. msix->entry);
  5901. }
  5902. } else {
  5903. /* INTx|MSI */
  5904. if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
  5905. icb->msix_atio = 0;
  5906. icb->firmware_options_2 |= cpu_to_le32(BIT_26);
  5907. ql_dbg(ql_dbg_init, vha, 0xf072,
  5908. "%s: Use INTx for ATIOQ.\n", __func__);
  5909. }
  5910. }
  5911. }
  5912. void
  5913. qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
  5914. {
  5915. struct qla_hw_data *ha = vha->hw;
  5916. u32 tmp;
  5917. if (!QLA_TGT_MODE_ENABLED())
  5918. return;
  5919. if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
  5920. if (!ha->tgt.saved_set) {
  5921. /* We save only once */
  5922. ha->tgt.saved_exchange_count = nv->exchange_count;
  5923. ha->tgt.saved_firmware_options_1 =
  5924. nv->firmware_options_1;
  5925. ha->tgt.saved_firmware_options_2 =
  5926. nv->firmware_options_2;
  5927. ha->tgt.saved_firmware_options_3 =
  5928. nv->firmware_options_3;
  5929. ha->tgt.saved_set = 1;
  5930. }
  5931. if (qla_tgt_mode_enabled(vha))
  5932. nv->exchange_count = cpu_to_le16(0xFFFF);
  5933. else /* dual */
  5934. nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
  5935. /* Enable target mode */
  5936. nv->firmware_options_1 |= cpu_to_le32(BIT_4);
  5937. /* Disable ini mode, if requested */
  5938. if (qla_tgt_mode_enabled(vha))
  5939. nv->firmware_options_1 |= cpu_to_le32(BIT_5);
  5940. /* Disable Full Login after LIP */
  5941. nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
  5942. /* Enable initial LIP */
  5943. nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
  5944. if (ql2xtgt_tape_enable)
  5945. /* Enable FC Tape support */
  5946. nv->firmware_options_2 |= cpu_to_le32(BIT_12);
  5947. else
  5948. /* Disable FC Tape support */
  5949. nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
  5950. /* Disable Full Login after LIP */
  5951. nv->host_p &= cpu_to_le32(~BIT_10);
  5952. /*
  5953. * clear BIT 15 explicitly as we have seen at least
  5954. * a couple of instances where this was set and this
  5955. * was causing the firmware to not be initialized.
  5956. */
  5957. nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
  5958. /* Enable target PRLI control */
  5959. nv->firmware_options_2 |= cpu_to_le32(BIT_14);
  5960. if (IS_QLA25XX(ha)) {
  5961. /* Change Loop-prefer to Pt-Pt */
  5962. tmp = ~(BIT_4|BIT_5|BIT_6);
  5963. nv->firmware_options_2 &= cpu_to_le32(tmp);
  5964. tmp = P2P << 4;
  5965. nv->firmware_options_2 |= cpu_to_le32(tmp);
  5966. }
  5967. } else {
  5968. if (ha->tgt.saved_set) {
  5969. nv->exchange_count = ha->tgt.saved_exchange_count;
  5970. nv->firmware_options_1 =
  5971. ha->tgt.saved_firmware_options_1;
  5972. nv->firmware_options_2 =
  5973. ha->tgt.saved_firmware_options_2;
  5974. nv->firmware_options_3 =
  5975. ha->tgt.saved_firmware_options_3;
  5976. }
  5977. return;
  5978. }
  5979. if (ha->base_qpair->enable_class_2) {
  5980. if (vha->flags.init_done)
  5981. fc_host_supported_classes(vha->host) =
  5982. FC_COS_CLASS2 | FC_COS_CLASS3;
  5983. nv->firmware_options_2 |= cpu_to_le32(BIT_8);
  5984. } else {
  5985. if (vha->flags.init_done)
  5986. fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
  5987. nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
  5988. }
  5989. }
  5990. void
  5991. qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
  5992. struct init_cb_24xx *icb)
  5993. {
  5994. struct qla_hw_data *ha = vha->hw;
  5995. if (!QLA_TGT_MODE_ENABLED())
  5996. return;
  5997. if (ha->tgt.node_name_set) {
  5998. memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
  5999. icb->firmware_options_1 |= cpu_to_le32(BIT_14);
  6000. }
  6001. }
  6002. void
  6003. qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
  6004. {
  6005. struct qla_hw_data *ha = vha->hw;
  6006. u32 tmp;
  6007. if (!QLA_TGT_MODE_ENABLED())
  6008. return;
  6009. if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
  6010. if (!ha->tgt.saved_set) {
  6011. /* We save only once */
  6012. ha->tgt.saved_exchange_count = nv->exchange_count;
  6013. ha->tgt.saved_firmware_options_1 =
  6014. nv->firmware_options_1;
  6015. ha->tgt.saved_firmware_options_2 =
  6016. nv->firmware_options_2;
  6017. ha->tgt.saved_firmware_options_3 =
  6018. nv->firmware_options_3;
  6019. ha->tgt.saved_set = 1;
  6020. }
  6021. if (qla_tgt_mode_enabled(vha))
  6022. nv->exchange_count = cpu_to_le16(0xFFFF);
  6023. else /* dual */
  6024. nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
  6025. /* Enable target mode */
  6026. nv->firmware_options_1 |= cpu_to_le32(BIT_4);
  6027. /* Disable ini mode, if requested */
  6028. if (qla_tgt_mode_enabled(vha))
  6029. nv->firmware_options_1 |= cpu_to_le32(BIT_5);
  6030. /* Disable Full Login after LIP */
  6031. nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
  6032. /* Enable initial LIP */
  6033. nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
  6034. /*
  6035. * clear BIT 15 explicitly as we have seen at
  6036. * least a couple of instances where this was set
  6037. * and this was causing the firmware to not be
  6038. * initialized.
  6039. */
  6040. nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
  6041. if (ql2xtgt_tape_enable)
  6042. /* Enable FC tape support */
  6043. nv->firmware_options_2 |= cpu_to_le32(BIT_12);
  6044. else
  6045. /* Disable FC tape support */
  6046. nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
  6047. /* Disable Full Login after LIP */
  6048. nv->host_p &= cpu_to_le32(~BIT_10);
  6049. /* Enable target PRLI control */
  6050. nv->firmware_options_2 |= cpu_to_le32(BIT_14);
  6051. /* Change Loop-prefer to Pt-Pt */
  6052. tmp = ~(BIT_4|BIT_5|BIT_6);
  6053. nv->firmware_options_2 &= cpu_to_le32(tmp);
  6054. tmp = P2P << 4;
  6055. nv->firmware_options_2 |= cpu_to_le32(tmp);
  6056. } else {
  6057. if (ha->tgt.saved_set) {
  6058. nv->exchange_count = ha->tgt.saved_exchange_count;
  6059. nv->firmware_options_1 =
  6060. ha->tgt.saved_firmware_options_1;
  6061. nv->firmware_options_2 =
  6062. ha->tgt.saved_firmware_options_2;
  6063. nv->firmware_options_3 =
  6064. ha->tgt.saved_firmware_options_3;
  6065. }
  6066. return;
  6067. }
  6068. if (ha->base_qpair->enable_class_2) {
  6069. if (vha->flags.init_done)
  6070. fc_host_supported_classes(vha->host) =
  6071. FC_COS_CLASS2 | FC_COS_CLASS3;
  6072. nv->firmware_options_2 |= cpu_to_le32(BIT_8);
  6073. } else {
  6074. if (vha->flags.init_done)
  6075. fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
  6076. nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
  6077. }
  6078. }
  6079. void
  6080. qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
  6081. struct init_cb_81xx *icb)
  6082. {
  6083. struct qla_hw_data *ha = vha->hw;
  6084. if (!QLA_TGT_MODE_ENABLED())
  6085. return;
  6086. if (ha->tgt.node_name_set) {
  6087. memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
  6088. icb->firmware_options_1 |= cpu_to_le32(BIT_14);
  6089. }
  6090. }
  6091. void
  6092. qlt_83xx_iospace_config(struct qla_hw_data *ha)
  6093. {
  6094. if (!QLA_TGT_MODE_ENABLED())
  6095. return;
  6096. ha->msix_count += 1; /* For ATIO Q */
  6097. }
  6098. void
  6099. qlt_modify_vp_config(struct scsi_qla_host *vha,
  6100. struct vp_config_entry_24xx *vpmod)
  6101. {
  6102. /* enable target mode. Bit5 = 1 => disable */
  6103. if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
  6104. vpmod->options_idx1 &= ~BIT_5;
  6105. /* Disable ini mode, if requested. bit4 = 1 => disable */
  6106. if (qla_tgt_mode_enabled(vha))
  6107. vpmod->options_idx1 &= ~BIT_4;
  6108. }
  6109. void
  6110. qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
  6111. {
  6112. mutex_init(&base_vha->vha_tgt.tgt_mutex);
  6113. if (!QLA_TGT_MODE_ENABLED())
  6114. return;
  6115. if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
  6116. ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
  6117. ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
  6118. } else {
  6119. ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
  6120. ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
  6121. }
  6122. mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
  6123. INIT_LIST_HEAD(&base_vha->unknown_atio_list);
  6124. INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
  6125. qlt_unknown_atio_work_fn);
  6126. qlt_clear_mode(base_vha);
  6127. qlt_update_vp_map(base_vha, SET_VP_IDX);
  6128. }
  6129. irqreturn_t
  6130. qla83xx_msix_atio_q(int irq, void *dev_id)
  6131. {
  6132. struct rsp_que *rsp;
  6133. scsi_qla_host_t *vha;
  6134. struct qla_hw_data *ha;
  6135. unsigned long flags;
  6136. rsp = (struct rsp_que *) dev_id;
  6137. ha = rsp->hw;
  6138. vha = pci_get_drvdata(ha->pdev);
  6139. spin_lock_irqsave(&ha->tgt.atio_lock, flags);
  6140. qlt_24xx_process_atio_queue(vha, 0);
  6141. spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
  6142. return IRQ_HANDLED;
  6143. }
  6144. static void
  6145. qlt_handle_abts_recv_work(struct work_struct *work)
  6146. {
  6147. struct qla_tgt_sess_op *op = container_of(work,
  6148. struct qla_tgt_sess_op, work);
  6149. scsi_qla_host_t *vha = op->vha;
  6150. struct qla_hw_data *ha = vha->hw;
  6151. unsigned long flags;
  6152. if (qla2x00_reset_active(vha) ||
  6153. (op->chip_reset != ha->base_qpair->chip_reset))
  6154. return;
  6155. spin_lock_irqsave(&ha->tgt.atio_lock, flags);
  6156. qlt_24xx_process_atio_queue(vha, 0);
  6157. spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
  6158. spin_lock_irqsave(&ha->hardware_lock, flags);
  6159. qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
  6160. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  6161. kfree(op);
  6162. }
  6163. void
  6164. qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
  6165. response_t *pkt)
  6166. {
  6167. struct qla_tgt_sess_op *op;
  6168. op = kzalloc(sizeof(*op), GFP_ATOMIC);
  6169. if (!op) {
  6170. /* do not reach for ATIO queue here. This is best effort err
  6171. * recovery at this point.
  6172. */
  6173. qlt_response_pkt_all_vps(vha, rsp, pkt);
  6174. return;
  6175. }
  6176. memcpy(&op->atio, pkt, sizeof(*pkt));
  6177. op->vha = vha;
  6178. op->chip_reset = vha->hw->base_qpair->chip_reset;
  6179. op->rsp = rsp;
  6180. INIT_WORK(&op->work, qlt_handle_abts_recv_work);
  6181. queue_work(qla_tgt_wq, &op->work);
  6182. return;
  6183. }
  6184. int
  6185. qlt_mem_alloc(struct qla_hw_data *ha)
  6186. {
  6187. if (!QLA_TGT_MODE_ENABLED())
  6188. return 0;
  6189. ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC,
  6190. sizeof(struct qla_tgt_vp_map),
  6191. GFP_KERNEL);
  6192. if (!ha->tgt.tgt_vp_map)
  6193. return -ENOMEM;
  6194. ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
  6195. (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
  6196. &ha->tgt.atio_dma, GFP_KERNEL);
  6197. if (!ha->tgt.atio_ring) {
  6198. kfree(ha->tgt.tgt_vp_map);
  6199. return -ENOMEM;
  6200. }
  6201. return 0;
  6202. }
  6203. void
  6204. qlt_mem_free(struct qla_hw_data *ha)
  6205. {
  6206. if (!QLA_TGT_MODE_ENABLED())
  6207. return;
  6208. if (ha->tgt.atio_ring) {
  6209. dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
  6210. sizeof(struct atio_from_isp), ha->tgt.atio_ring,
  6211. ha->tgt.atio_dma);
  6212. }
  6213. ha->tgt.atio_ring = NULL;
  6214. ha->tgt.atio_dma = 0;
  6215. kfree(ha->tgt.tgt_vp_map);
  6216. ha->tgt.tgt_vp_map = NULL;
  6217. }
  6218. /* vport_slock to be held by the caller */
  6219. void
  6220. qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
  6221. {
  6222. void *slot;
  6223. u32 key;
  6224. int rc;
  6225. key = vha->d_id.b24;
  6226. switch (cmd) {
  6227. case SET_VP_IDX:
  6228. if (!QLA_TGT_MODE_ENABLED())
  6229. return;
  6230. vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
  6231. break;
  6232. case SET_AL_PA:
  6233. slot = btree_lookup32(&vha->hw->host_map, key);
  6234. if (!slot) {
  6235. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
  6236. "Save vha in host_map %p %06x\n", vha, key);
  6237. rc = btree_insert32(&vha->hw->host_map,
  6238. key, vha, GFP_ATOMIC);
  6239. if (rc)
  6240. ql_log(ql_log_info, vha, 0xd03e,
  6241. "Unable to insert s_id into host_map: %06x\n",
  6242. key);
  6243. return;
  6244. }
  6245. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
  6246. "replace existing vha in host_map %p %06x\n", vha, key);
  6247. btree_update32(&vha->hw->host_map, key, vha);
  6248. break;
  6249. case RESET_VP_IDX:
  6250. if (!QLA_TGT_MODE_ENABLED())
  6251. return;
  6252. vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
  6253. break;
  6254. case RESET_AL_PA:
  6255. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
  6256. "clear vha in host_map %p %06x\n", vha, key);
  6257. slot = btree_lookup32(&vha->hw->host_map, key);
  6258. if (slot)
  6259. btree_remove32(&vha->hw->host_map, key);
  6260. vha->d_id.b24 = 0;
  6261. break;
  6262. }
  6263. }
  6264. void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
  6265. {
  6266. if (!vha->d_id.b24) {
  6267. vha->d_id = id;
  6268. qlt_update_vp_map(vha, SET_AL_PA);
  6269. } else if (vha->d_id.b24 != id.b24) {
  6270. qlt_update_vp_map(vha, RESET_AL_PA);
  6271. vha->d_id = id;
  6272. qlt_update_vp_map(vha, SET_AL_PA);
  6273. }
  6274. }
  6275. static int __init qlt_parse_ini_mode(void)
  6276. {
  6277. if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
  6278. ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
  6279. else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
  6280. ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
  6281. else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
  6282. ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
  6283. else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
  6284. ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
  6285. else
  6286. return false;
  6287. return true;
  6288. }
  6289. int __init qlt_init(void)
  6290. {
  6291. int ret;
  6292. BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64);
  6293. BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64);
  6294. if (!qlt_parse_ini_mode()) {
  6295. ql_log(ql_log_fatal, NULL, 0xe06b,
  6296. "qlt_parse_ini_mode() failed\n");
  6297. return -EINVAL;
  6298. }
  6299. if (!QLA_TGT_MODE_ENABLED())
  6300. return 0;
  6301. qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
  6302. sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
  6303. qla_tgt_mgmt_cmd), 0, NULL);
  6304. if (!qla_tgt_mgmt_cmd_cachep) {
  6305. ql_log(ql_log_fatal, NULL, 0xd04b,
  6306. "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
  6307. return -ENOMEM;
  6308. }
  6309. qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
  6310. sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
  6311. 0, NULL);
  6312. if (!qla_tgt_plogi_cachep) {
  6313. ql_log(ql_log_fatal, NULL, 0xe06d,
  6314. "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
  6315. ret = -ENOMEM;
  6316. goto out_mgmt_cmd_cachep;
  6317. }
  6318. qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
  6319. mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
  6320. if (!qla_tgt_mgmt_cmd_mempool) {
  6321. ql_log(ql_log_fatal, NULL, 0xe06e,
  6322. "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
  6323. ret = -ENOMEM;
  6324. goto out_plogi_cachep;
  6325. }
  6326. qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
  6327. if (!qla_tgt_wq) {
  6328. ql_log(ql_log_fatal, NULL, 0xe06f,
  6329. "alloc_workqueue for qla_tgt_wq failed\n");
  6330. ret = -ENOMEM;
  6331. goto out_cmd_mempool;
  6332. }
  6333. /*
  6334. * Return 1 to signal that initiator-mode is being disabled
  6335. */
  6336. return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
  6337. out_cmd_mempool:
  6338. mempool_destroy(qla_tgt_mgmt_cmd_mempool);
  6339. out_plogi_cachep:
  6340. kmem_cache_destroy(qla_tgt_plogi_cachep);
  6341. out_mgmt_cmd_cachep:
  6342. kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
  6343. return ret;
  6344. }
  6345. void qlt_exit(void)
  6346. {
  6347. if (!QLA_TGT_MODE_ENABLED())
  6348. return;
  6349. destroy_workqueue(qla_tgt_wq);
  6350. mempool_destroy(qla_tgt_mgmt_cmd_mempool);
  6351. kmem_cache_destroy(qla_tgt_plogi_cachep);
  6352. kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
  6353. }