mpt3sas_scsih.c 370 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943
  1. /*
  2. * Scsi Host Layer for MPT (Message Passing Technology) based controllers
  3. *
  4. * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
  5. * Copyright (C) 2012-2014 LSI Corporation
  6. * Copyright (C) 2013-2014 Avago Technologies
  7. * (mailto: [email protected])
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version 2
  12. * of the License, or (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * NO WARRANTY
  20. * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  21. * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  22. * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  23. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  24. * solely responsible for determining the appropriateness of using and
  25. * distributing the Program and assumes all risks associated with its
  26. * exercise of rights under this Agreement, including but not limited to
  27. * the risks and costs of program errors, damage to or loss of data,
  28. * programs or equipment, and unavailability or interruption of operations.
  29. * DISCLAIMER OF LIABILITY
  30. * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  31. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  32. * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  33. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  34. * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  35. * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  36. * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  37. * You should have received a copy of the GNU General Public License
  38. * along with this program; if not, write to the Free Software
  39. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
  40. * USA.
  41. */
  42. #include <linux/module.h>
  43. #include <linux/kernel.h>
  44. #include <linux/init.h>
  45. #include <linux/errno.h>
  46. #include <linux/blkdev.h>
  47. #include <linux/sched.h>
  48. #include <linux/workqueue.h>
  49. #include <linux/delay.h>
  50. #include <linux/pci.h>
  51. #include <linux/interrupt.h>
  52. #include <linux/aer.h>
  53. #include <linux/raid_class.h>
  54. #include <linux/blk-mq-pci.h>
  55. #include <asm/unaligned.h>
  56. #include "mpt3sas_base.h"
  57. #define RAID_CHANNEL 1
  58. #define PCIE_CHANNEL 2
  59. /* forward proto's */
  60. static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
  61. struct _sas_node *sas_expander);
  62. static void _firmware_event_work(struct work_struct *work);
  63. static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
  64. struct _sas_device *sas_device);
  65. static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
  66. u8 retry_count, u8 is_pd);
  67. static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
  68. static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
  69. struct _pcie_device *pcie_device);
  70. static void
  71. _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
  72. static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
  73. static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
  74. /* global parameters */
  75. LIST_HEAD(mpt3sas_ioc_list);
  76. /* global ioc lock for list operations */
  77. DEFINE_SPINLOCK(gioc_lock);
  78. MODULE_AUTHOR(MPT3SAS_AUTHOR);
  79. MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
  80. MODULE_LICENSE("GPL");
  81. MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
  82. MODULE_ALIAS("mpt2sas");
  83. /* local parameters */
  84. static u8 scsi_io_cb_idx = -1;
  85. static u8 tm_cb_idx = -1;
  86. static u8 ctl_cb_idx = -1;
  87. static u8 base_cb_idx = -1;
  88. static u8 port_enable_cb_idx = -1;
  89. static u8 transport_cb_idx = -1;
  90. static u8 scsih_cb_idx = -1;
  91. static u8 config_cb_idx = -1;
  92. static int mpt2_ids;
  93. static int mpt3_ids;
  94. static u8 tm_tr_cb_idx = -1 ;
  95. static u8 tm_tr_volume_cb_idx = -1 ;
  96. static u8 tm_sas_control_cb_idx = -1;
  97. /* command line options */
  98. static u32 logging_level;
  99. MODULE_PARM_DESC(logging_level,
  100. " bits for enabling additional logging info (default=0)");
  101. static ushort max_sectors = 0xFFFF;
  102. module_param(max_sectors, ushort, 0444);
  103. MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
  104. static int missing_delay[2] = {-1, -1};
  105. module_param_array(missing_delay, int, NULL, 0444);
  106. MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
  107. /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
  108. #define MPT3SAS_MAX_LUN (16895)
  109. static u64 max_lun = MPT3SAS_MAX_LUN;
  110. module_param(max_lun, ullong, 0444);
  111. MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
  112. static ushort hbas_to_enumerate;
  113. module_param(hbas_to_enumerate, ushort, 0444);
  114. MODULE_PARM_DESC(hbas_to_enumerate,
  115. " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
  116. 1 - enumerates only SAS 2.0 generation HBAs\n \
  117. 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
  118. /* diag_buffer_enable is bitwise
  119. * bit 0 set = TRACE
  120. * bit 1 set = SNAPSHOT
  121. * bit 2 set = EXTENDED
  122. *
  123. * Either bit can be set, or both
  124. */
  125. static int diag_buffer_enable = -1;
  126. module_param(diag_buffer_enable, int, 0444);
  127. MODULE_PARM_DESC(diag_buffer_enable,
  128. " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
  129. static int disable_discovery = -1;
  130. module_param(disable_discovery, int, 0444);
  131. MODULE_PARM_DESC(disable_discovery, " disable discovery ");
  132. /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
  133. static int prot_mask = -1;
  134. module_param(prot_mask, int, 0444);
  135. MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
  136. static bool enable_sdev_max_qd;
  137. module_param(enable_sdev_max_qd, bool, 0444);
  138. MODULE_PARM_DESC(enable_sdev_max_qd,
  139. "Enable sdev max qd as can_queue, def=disabled(0)");
  140. static int multipath_on_hba = -1;
  141. module_param(multipath_on_hba, int, 0);
  142. MODULE_PARM_DESC(multipath_on_hba,
  143. "Multipath support to add same target device\n\t\t"
  144. "as many times as it is visible to HBA from various paths\n\t\t"
  145. "(by default:\n\t\t"
  146. "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
  147. "\t SAS 3.5 HBA - This will be enabled)");
  148. static int host_tagset_enable = 1;
  149. module_param(host_tagset_enable, int, 0444);
  150. MODULE_PARM_DESC(host_tagset_enable,
  151. "Shared host tagset enable/disable Default: enable(1)");
  152. /* raid transport support */
  153. static struct raid_template *mpt3sas_raid_template;
  154. static struct raid_template *mpt2sas_raid_template;
  155. /**
  156. * struct sense_info - common structure for obtaining sense keys
  157. * @skey: sense key
  158. * @asc: additional sense code
  159. * @ascq: additional sense code qualifier
  160. */
  161. struct sense_info {
  162. u8 skey;
  163. u8 asc;
  164. u8 ascq;
  165. };
  166. #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
  167. #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
  168. #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
  169. #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
  170. #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
  171. /**
  172. * struct fw_event_work - firmware event struct
  173. * @list: link list framework
  174. * @work: work object (ioc->fault_reset_work_q)
  175. * @ioc: per adapter object
  176. * @device_handle: device handle
  177. * @VF_ID: virtual function id
  178. * @VP_ID: virtual port id
  179. * @ignore: flag meaning this event has been marked to ignore
  180. * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
  181. * @refcount: kref for this event
  182. * @event_data: reply event data payload follows
  183. *
  184. * This object stored on ioc->fw_event_list.
  185. */
  186. struct fw_event_work {
  187. struct list_head list;
  188. struct work_struct work;
  189. struct MPT3SAS_ADAPTER *ioc;
  190. u16 device_handle;
  191. u8 VF_ID;
  192. u8 VP_ID;
  193. u8 ignore;
  194. u16 event;
  195. struct kref refcount;
  196. char event_data[] __aligned(4);
  197. };
  198. static void fw_event_work_free(struct kref *r)
  199. {
  200. kfree(container_of(r, struct fw_event_work, refcount));
  201. }
  202. static void fw_event_work_get(struct fw_event_work *fw_work)
  203. {
  204. kref_get(&fw_work->refcount);
  205. }
  206. static void fw_event_work_put(struct fw_event_work *fw_work)
  207. {
  208. kref_put(&fw_work->refcount, fw_event_work_free);
  209. }
  210. static struct fw_event_work *alloc_fw_event_work(int len)
  211. {
  212. struct fw_event_work *fw_event;
  213. fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
  214. if (!fw_event)
  215. return NULL;
  216. kref_init(&fw_event->refcount);
  217. return fw_event;
  218. }
  219. /**
  220. * struct _scsi_io_transfer - scsi io transfer
  221. * @handle: sas device handle (assigned by firmware)
  222. * @is_raid: flag set for hidden raid components
  223. * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
  224. * @data_length: data transfer length
  225. * @data_dma: dma pointer to data
  226. * @sense: sense data
  227. * @lun: lun number
  228. * @cdb_length: cdb length
  229. * @cdb: cdb contents
  230. * @timeout: timeout for this command
  231. * @VF_ID: virtual function id
  232. * @VP_ID: virtual port id
  233. * @valid_reply: flag set for reply message
  234. * @sense_length: sense length
  235. * @ioc_status: ioc status
  236. * @scsi_state: scsi state
  237. * @scsi_status: scsi staus
  238. * @log_info: log information
  239. * @transfer_length: data length transfer when there is a reply message
  240. *
  241. * Used for sending internal scsi commands to devices within this module.
  242. * Refer to _scsi_send_scsi_io().
  243. */
  244. struct _scsi_io_transfer {
  245. u16 handle;
  246. u8 is_raid;
  247. enum dma_data_direction dir;
  248. u32 data_length;
  249. dma_addr_t data_dma;
  250. u8 sense[SCSI_SENSE_BUFFERSIZE];
  251. u32 lun;
  252. u8 cdb_length;
  253. u8 cdb[32];
  254. u8 timeout;
  255. u8 VF_ID;
  256. u8 VP_ID;
  257. u8 valid_reply;
  258. /* the following bits are only valid when 'valid_reply = 1' */
  259. u32 sense_length;
  260. u16 ioc_status;
  261. u8 scsi_state;
  262. u8 scsi_status;
  263. u32 log_info;
  264. u32 transfer_length;
  265. };
  266. /**
  267. * _scsih_set_debug_level - global setting of ioc->logging_level.
  268. * @val: ?
  269. * @kp: ?
  270. *
  271. * Note: The logging levels are defined in mpt3sas_debug.h.
  272. */
  273. static int
  274. _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
  275. {
  276. int ret = param_set_int(val, kp);
  277. struct MPT3SAS_ADAPTER *ioc;
  278. if (ret)
  279. return ret;
  280. pr_info("setting logging_level(0x%08x)\n", logging_level);
  281. spin_lock(&gioc_lock);
  282. list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
  283. ioc->logging_level = logging_level;
  284. spin_unlock(&gioc_lock);
  285. return 0;
  286. }
  287. module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
  288. &logging_level, 0644);
  289. /**
  290. * _scsih_srch_boot_sas_address - search based on sas_address
  291. * @sas_address: sas address
  292. * @boot_device: boot device object from bios page 2
  293. *
  294. * Return: 1 when there's a match, 0 means no match.
  295. */
  296. static inline int
  297. _scsih_srch_boot_sas_address(u64 sas_address,
  298. Mpi2BootDeviceSasWwid_t *boot_device)
  299. {
  300. return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
  301. }
  302. /**
  303. * _scsih_srch_boot_device_name - search based on device name
  304. * @device_name: device name specified in INDENTIFY fram
  305. * @boot_device: boot device object from bios page 2
  306. *
  307. * Return: 1 when there's a match, 0 means no match.
  308. */
  309. static inline int
  310. _scsih_srch_boot_device_name(u64 device_name,
  311. Mpi2BootDeviceDeviceName_t *boot_device)
  312. {
  313. return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
  314. }
  315. /**
  316. * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
  317. * @enclosure_logical_id: enclosure logical id
  318. * @slot_number: slot number
  319. * @boot_device: boot device object from bios page 2
  320. *
  321. * Return: 1 when there's a match, 0 means no match.
  322. */
  323. static inline int
  324. _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
  325. Mpi2BootDeviceEnclosureSlot_t *boot_device)
  326. {
  327. return (enclosure_logical_id == le64_to_cpu(boot_device->
  328. EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
  329. SlotNumber)) ? 1 : 0;
  330. }
  331. /**
  332. * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
  333. * port number from port list
  334. * @ioc: per adapter object
  335. * @port_id: port number
  336. * @bypass_dirty_port_flag: when set look the matching hba port entry even
  337. * if hba port entry is marked as dirty.
  338. *
  339. * Search for hba port entry corresponding to provided port number,
  340. * if available return port object otherwise return NULL.
  341. */
  342. struct hba_port *
  343. mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
  344. u8 port_id, u8 bypass_dirty_port_flag)
  345. {
  346. struct hba_port *port, *port_next;
  347. /*
  348. * When multipath_on_hba is disabled then
  349. * search the hba_port entry using default
  350. * port id i.e. 255
  351. */
  352. if (!ioc->multipath_on_hba)
  353. port_id = MULTIPATH_DISABLED_PORT_ID;
  354. list_for_each_entry_safe(port, port_next,
  355. &ioc->port_table_list, list) {
  356. if (port->port_id != port_id)
  357. continue;
  358. if (bypass_dirty_port_flag)
  359. return port;
  360. if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
  361. continue;
  362. return port;
  363. }
  364. /*
  365. * Allocate hba_port object for default port id (i.e. 255)
  366. * when multipath_on_hba is disabled for the HBA.
  367. * And add this object to port_table_list.
  368. */
  369. if (!ioc->multipath_on_hba) {
  370. port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
  371. if (!port)
  372. return NULL;
  373. port->port_id = port_id;
  374. ioc_info(ioc,
  375. "hba_port entry: %p, port: %d is added to hba_port list\n",
  376. port, port->port_id);
  377. list_add_tail(&port->list,
  378. &ioc->port_table_list);
  379. return port;
  380. }
  381. return NULL;
  382. }
  383. /**
  384. * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
  385. * @ioc: per adapter object
  386. * @port: hba_port object
  387. * @phy: phy number
  388. *
  389. * Return virtual_phy object corresponding to phy number.
  390. */
  391. struct virtual_phy *
  392. mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
  393. struct hba_port *port, u32 phy)
  394. {
  395. struct virtual_phy *vphy, *vphy_next;
  396. if (!port->vphys_mask)
  397. return NULL;
  398. list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
  399. if (vphy->phy_mask & (1 << phy))
  400. return vphy;
  401. }
  402. return NULL;
  403. }
  404. /**
  405. * _scsih_is_boot_device - search for matching boot device.
  406. * @sas_address: sas address
  407. * @device_name: device name specified in INDENTIFY fram
  408. * @enclosure_logical_id: enclosure logical id
  409. * @slot: slot number
  410. * @form: specifies boot device form
  411. * @boot_device: boot device object from bios page 2
  412. *
  413. * Return: 1 when there's a match, 0 means no match.
  414. */
  415. static int
  416. _scsih_is_boot_device(u64 sas_address, u64 device_name,
  417. u64 enclosure_logical_id, u16 slot, u8 form,
  418. Mpi2BiosPage2BootDevice_t *boot_device)
  419. {
  420. int rc = 0;
  421. switch (form) {
  422. case MPI2_BIOSPAGE2_FORM_SAS_WWID:
  423. if (!sas_address)
  424. break;
  425. rc = _scsih_srch_boot_sas_address(
  426. sas_address, &boot_device->SasWwid);
  427. break;
  428. case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
  429. if (!enclosure_logical_id)
  430. break;
  431. rc = _scsih_srch_boot_encl_slot(
  432. enclosure_logical_id,
  433. slot, &boot_device->EnclosureSlot);
  434. break;
  435. case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
  436. if (!device_name)
  437. break;
  438. rc = _scsih_srch_boot_device_name(
  439. device_name, &boot_device->DeviceName);
  440. break;
  441. case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
  442. break;
  443. }
  444. return rc;
  445. }
  446. /**
  447. * _scsih_get_sas_address - set the sas_address for given device handle
  448. * @ioc: ?
  449. * @handle: device handle
  450. * @sas_address: sas address
  451. *
  452. * Return: 0 success, non-zero when failure
  453. */
  454. static int
  455. _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
  456. u64 *sas_address)
  457. {
  458. Mpi2SasDevicePage0_t sas_device_pg0;
  459. Mpi2ConfigReply_t mpi_reply;
  460. u32 ioc_status;
  461. *sas_address = 0;
  462. if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
  463. MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
  464. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  465. __FILE__, __LINE__, __func__);
  466. return -ENXIO;
  467. }
  468. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
  469. if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
  470. /* For HBA, vSES doesn't return HBA SAS address. Instead return
  471. * vSES's sas address.
  472. */
  473. if ((handle <= ioc->sas_hba.num_phys) &&
  474. (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
  475. MPI2_SAS_DEVICE_INFO_SEP)))
  476. *sas_address = ioc->sas_hba.sas_address;
  477. else
  478. *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
  479. return 0;
  480. }
  481. /* we hit this because the given parent handle doesn't exist */
  482. if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
  483. return -ENXIO;
  484. /* else error case */
  485. ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
  486. handle, ioc_status, __FILE__, __LINE__, __func__);
  487. return -EIO;
  488. }
  489. /**
  490. * _scsih_determine_boot_device - determine boot device.
  491. * @ioc: per adapter object
  492. * @device: sas_device or pcie_device object
  493. * @channel: SAS or PCIe channel
  494. *
  495. * Determines whether this device should be first reported device to
  496. * to scsi-ml or sas transport, this purpose is for persistent boot device.
  497. * There are primary, alternate, and current entries in bios page 2. The order
  498. * priority is primary, alternate, then current. This routine saves
  499. * the corresponding device object.
  500. * The saved data to be used later in _scsih_probe_boot_devices().
  501. */
  502. static void
  503. _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
  504. u32 channel)
  505. {
  506. struct _sas_device *sas_device;
  507. struct _pcie_device *pcie_device;
  508. struct _raid_device *raid_device;
  509. u64 sas_address;
  510. u64 device_name;
  511. u64 enclosure_logical_id;
  512. u16 slot;
  513. /* only process this function when driver loads */
  514. if (!ioc->is_driver_loading)
  515. return;
  516. /* no Bios, return immediately */
  517. if (!ioc->bios_pg3.BiosVersion)
  518. return;
  519. if (channel == RAID_CHANNEL) {
  520. raid_device = device;
  521. sas_address = raid_device->wwid;
  522. device_name = 0;
  523. enclosure_logical_id = 0;
  524. slot = 0;
  525. } else if (channel == PCIE_CHANNEL) {
  526. pcie_device = device;
  527. sas_address = pcie_device->wwid;
  528. device_name = 0;
  529. enclosure_logical_id = 0;
  530. slot = 0;
  531. } else {
  532. sas_device = device;
  533. sas_address = sas_device->sas_address;
  534. device_name = sas_device->device_name;
  535. enclosure_logical_id = sas_device->enclosure_logical_id;
  536. slot = sas_device->slot;
  537. }
  538. if (!ioc->req_boot_device.device) {
  539. if (_scsih_is_boot_device(sas_address, device_name,
  540. enclosure_logical_id, slot,
  541. (ioc->bios_pg2.ReqBootDeviceForm &
  542. MPI2_BIOSPAGE2_FORM_MASK),
  543. &ioc->bios_pg2.RequestedBootDevice)) {
  544. dinitprintk(ioc,
  545. ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
  546. __func__, (u64)sas_address));
  547. ioc->req_boot_device.device = device;
  548. ioc->req_boot_device.channel = channel;
  549. }
  550. }
  551. if (!ioc->req_alt_boot_device.device) {
  552. if (_scsih_is_boot_device(sas_address, device_name,
  553. enclosure_logical_id, slot,
  554. (ioc->bios_pg2.ReqAltBootDeviceForm &
  555. MPI2_BIOSPAGE2_FORM_MASK),
  556. &ioc->bios_pg2.RequestedAltBootDevice)) {
  557. dinitprintk(ioc,
  558. ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
  559. __func__, (u64)sas_address));
  560. ioc->req_alt_boot_device.device = device;
  561. ioc->req_alt_boot_device.channel = channel;
  562. }
  563. }
  564. if (!ioc->current_boot_device.device) {
  565. if (_scsih_is_boot_device(sas_address, device_name,
  566. enclosure_logical_id, slot,
  567. (ioc->bios_pg2.CurrentBootDeviceForm &
  568. MPI2_BIOSPAGE2_FORM_MASK),
  569. &ioc->bios_pg2.CurrentBootDevice)) {
  570. dinitprintk(ioc,
  571. ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
  572. __func__, (u64)sas_address));
  573. ioc->current_boot_device.device = device;
  574. ioc->current_boot_device.channel = channel;
  575. }
  576. }
  577. }
  578. static struct _sas_device *
  579. __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
  580. struct MPT3SAS_TARGET *tgt_priv)
  581. {
  582. struct _sas_device *ret;
  583. assert_spin_locked(&ioc->sas_device_lock);
  584. ret = tgt_priv->sas_dev;
  585. if (ret)
  586. sas_device_get(ret);
  587. return ret;
  588. }
  589. static struct _sas_device *
  590. mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
  591. struct MPT3SAS_TARGET *tgt_priv)
  592. {
  593. struct _sas_device *ret;
  594. unsigned long flags;
  595. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  596. ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
  597. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  598. return ret;
  599. }
  600. static struct _pcie_device *
  601. __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
  602. struct MPT3SAS_TARGET *tgt_priv)
  603. {
  604. struct _pcie_device *ret;
  605. assert_spin_locked(&ioc->pcie_device_lock);
  606. ret = tgt_priv->pcie_dev;
  607. if (ret)
  608. pcie_device_get(ret);
  609. return ret;
  610. }
  611. /**
  612. * mpt3sas_get_pdev_from_target - pcie device search
  613. * @ioc: per adapter object
  614. * @tgt_priv: starget private object
  615. *
  616. * Context: This function will acquire ioc->pcie_device_lock and will release
  617. * before returning the pcie_device object.
  618. *
  619. * This searches for pcie_device from target, then return pcie_device object.
  620. */
  621. static struct _pcie_device *
  622. mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
  623. struct MPT3SAS_TARGET *tgt_priv)
  624. {
  625. struct _pcie_device *ret;
  626. unsigned long flags;
  627. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  628. ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
  629. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  630. return ret;
  631. }
  632. /**
  633. * __mpt3sas_get_sdev_by_rphy - sas device search
  634. * @ioc: per adapter object
  635. * @rphy: sas_rphy pointer
  636. *
  637. * Context: This function will acquire ioc->sas_device_lock and will release
  638. * before returning the sas_device object.
  639. *
  640. * This searches for sas_device from rphy object
  641. * then return sas_device object.
  642. */
  643. struct _sas_device *
  644. __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
  645. struct sas_rphy *rphy)
  646. {
  647. struct _sas_device *sas_device;
  648. assert_spin_locked(&ioc->sas_device_lock);
  649. list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
  650. if (sas_device->rphy != rphy)
  651. continue;
  652. sas_device_get(sas_device);
  653. return sas_device;
  654. }
  655. sas_device = NULL;
  656. list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
  657. if (sas_device->rphy != rphy)
  658. continue;
  659. sas_device_get(sas_device);
  660. return sas_device;
  661. }
  662. return NULL;
  663. }
  664. /**
  665. * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
  666. * sas address from sas_device_list list
  667. * @ioc: per adapter object
  668. * @sas_address: device sas address
  669. * @port: port number
  670. *
  671. * Search for _sas_device object corresponding to provided sas address,
  672. * if available return _sas_device object address otherwise return NULL.
  673. */
  674. struct _sas_device *
  675. __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
  676. u64 sas_address, struct hba_port *port)
  677. {
  678. struct _sas_device *sas_device;
  679. if (!port)
  680. return NULL;
  681. assert_spin_locked(&ioc->sas_device_lock);
  682. list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
  683. if (sas_device->sas_address != sas_address)
  684. continue;
  685. if (sas_device->port != port)
  686. continue;
  687. sas_device_get(sas_device);
  688. return sas_device;
  689. }
  690. list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
  691. if (sas_device->sas_address != sas_address)
  692. continue;
  693. if (sas_device->port != port)
  694. continue;
  695. sas_device_get(sas_device);
  696. return sas_device;
  697. }
  698. return NULL;
  699. }
  700. /**
  701. * mpt3sas_get_sdev_by_addr - sas device search
  702. * @ioc: per adapter object
  703. * @sas_address: sas address
  704. * @port: hba port entry
  705. * Context: Calling function should acquire ioc->sas_device_lock
  706. *
  707. * This searches for sas_device based on sas_address & port number,
  708. * then return sas_device object.
  709. */
  710. struct _sas_device *
  711. mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
  712. u64 sas_address, struct hba_port *port)
  713. {
  714. struct _sas_device *sas_device;
  715. unsigned long flags;
  716. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  717. sas_device = __mpt3sas_get_sdev_by_addr(ioc,
  718. sas_address, port);
  719. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  720. return sas_device;
  721. }
  722. static struct _sas_device *
  723. __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  724. {
  725. struct _sas_device *sas_device;
  726. assert_spin_locked(&ioc->sas_device_lock);
  727. list_for_each_entry(sas_device, &ioc->sas_device_list, list)
  728. if (sas_device->handle == handle)
  729. goto found_device;
  730. list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
  731. if (sas_device->handle == handle)
  732. goto found_device;
  733. return NULL;
  734. found_device:
  735. sas_device_get(sas_device);
  736. return sas_device;
  737. }
  738. /**
  739. * mpt3sas_get_sdev_by_handle - sas device search
  740. * @ioc: per adapter object
  741. * @handle: sas device handle (assigned by firmware)
  742. * Context: Calling function should acquire ioc->sas_device_lock
  743. *
  744. * This searches for sas_device based on sas_address, then return sas_device
  745. * object.
  746. */
  747. struct _sas_device *
  748. mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  749. {
  750. struct _sas_device *sas_device;
  751. unsigned long flags;
  752. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  753. sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
  754. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  755. return sas_device;
  756. }
  757. /**
  758. * _scsih_display_enclosure_chassis_info - display device location info
  759. * @ioc: per adapter object
  760. * @sas_device: per sas device object
  761. * @sdev: scsi device struct
  762. * @starget: scsi target struct
  763. */
  764. static void
  765. _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
  766. struct _sas_device *sas_device, struct scsi_device *sdev,
  767. struct scsi_target *starget)
  768. {
  769. if (sdev) {
  770. if (sas_device->enclosure_handle != 0)
  771. sdev_printk(KERN_INFO, sdev,
  772. "enclosure logical id (0x%016llx), slot(%d) \n",
  773. (unsigned long long)
  774. sas_device->enclosure_logical_id,
  775. sas_device->slot);
  776. if (sas_device->connector_name[0] != '\0')
  777. sdev_printk(KERN_INFO, sdev,
  778. "enclosure level(0x%04x), connector name( %s)\n",
  779. sas_device->enclosure_level,
  780. sas_device->connector_name);
  781. if (sas_device->is_chassis_slot_valid)
  782. sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
  783. sas_device->chassis_slot);
  784. } else if (starget) {
  785. if (sas_device->enclosure_handle != 0)
  786. starget_printk(KERN_INFO, starget,
  787. "enclosure logical id(0x%016llx), slot(%d) \n",
  788. (unsigned long long)
  789. sas_device->enclosure_logical_id,
  790. sas_device->slot);
  791. if (sas_device->connector_name[0] != '\0')
  792. starget_printk(KERN_INFO, starget,
  793. "enclosure level(0x%04x), connector name( %s)\n",
  794. sas_device->enclosure_level,
  795. sas_device->connector_name);
  796. if (sas_device->is_chassis_slot_valid)
  797. starget_printk(KERN_INFO, starget,
  798. "chassis slot(0x%04x)\n",
  799. sas_device->chassis_slot);
  800. } else {
  801. if (sas_device->enclosure_handle != 0)
  802. ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
  803. (u64)sas_device->enclosure_logical_id,
  804. sas_device->slot);
  805. if (sas_device->connector_name[0] != '\0')
  806. ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
  807. sas_device->enclosure_level,
  808. sas_device->connector_name);
  809. if (sas_device->is_chassis_slot_valid)
  810. ioc_info(ioc, "chassis slot(0x%04x)\n",
  811. sas_device->chassis_slot);
  812. }
  813. }
  814. /**
  815. * _scsih_sas_device_remove - remove sas_device from list.
  816. * @ioc: per adapter object
  817. * @sas_device: the sas_device object
  818. * Context: This function will acquire ioc->sas_device_lock.
  819. *
  820. * If sas_device is on the list, remove it and decrement its reference count.
  821. */
  822. static void
  823. _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
  824. struct _sas_device *sas_device)
  825. {
  826. unsigned long flags;
  827. if (!sas_device)
  828. return;
  829. ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
  830. sas_device->handle, (u64)sas_device->sas_address);
  831. _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
  832. /*
  833. * The lock serializes access to the list, but we still need to verify
  834. * that nobody removed the entry while we were waiting on the lock.
  835. */
  836. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  837. if (!list_empty(&sas_device->list)) {
  838. list_del_init(&sas_device->list);
  839. sas_device_put(sas_device);
  840. }
  841. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  842. }
  843. /**
  844. * _scsih_device_remove_by_handle - removing device object by handle
  845. * @ioc: per adapter object
  846. * @handle: device handle
  847. */
  848. static void
  849. _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  850. {
  851. struct _sas_device *sas_device;
  852. unsigned long flags;
  853. if (ioc->shost_recovery)
  854. return;
  855. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  856. sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
  857. if (sas_device) {
  858. list_del_init(&sas_device->list);
  859. sas_device_put(sas_device);
  860. }
  861. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  862. if (sas_device) {
  863. _scsih_remove_device(ioc, sas_device);
  864. sas_device_put(sas_device);
  865. }
  866. }
  867. /**
  868. * mpt3sas_device_remove_by_sas_address - removing device object by
  869. * sas address & port number
  870. * @ioc: per adapter object
  871. * @sas_address: device sas_address
  872. * @port: hba port entry
  873. *
  874. * Return nothing.
  875. */
  876. void
  877. mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
  878. u64 sas_address, struct hba_port *port)
  879. {
  880. struct _sas_device *sas_device;
  881. unsigned long flags;
  882. if (ioc->shost_recovery)
  883. return;
  884. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  885. sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
  886. if (sas_device) {
  887. list_del_init(&sas_device->list);
  888. sas_device_put(sas_device);
  889. }
  890. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  891. if (sas_device) {
  892. _scsih_remove_device(ioc, sas_device);
  893. sas_device_put(sas_device);
  894. }
  895. }
  896. /**
  897. * _scsih_sas_device_add - insert sas_device to the list.
  898. * @ioc: per adapter object
  899. * @sas_device: the sas_device object
  900. * Context: This function will acquire ioc->sas_device_lock.
  901. *
  902. * Adding new object to the ioc->sas_device_list.
  903. */
  904. static void
  905. _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
  906. struct _sas_device *sas_device)
  907. {
  908. unsigned long flags;
  909. dewtprintk(ioc,
  910. ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
  911. __func__, sas_device->handle,
  912. (u64)sas_device->sas_address));
  913. dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
  914. NULL, NULL));
  915. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  916. sas_device_get(sas_device);
  917. list_add_tail(&sas_device->list, &ioc->sas_device_list);
  918. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  919. if (ioc->hide_drives) {
  920. clear_bit(sas_device->handle, ioc->pend_os_device_add);
  921. return;
  922. }
  923. if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
  924. sas_device->sas_address_parent, sas_device->port)) {
  925. _scsih_sas_device_remove(ioc, sas_device);
  926. } else if (!sas_device->starget) {
  927. /*
  928. * When asyn scanning is enabled, its not possible to remove
  929. * devices while scanning is turned on due to an oops in
  930. * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
  931. */
  932. if (!ioc->is_driver_loading) {
  933. mpt3sas_transport_port_remove(ioc,
  934. sas_device->sas_address,
  935. sas_device->sas_address_parent,
  936. sas_device->port);
  937. _scsih_sas_device_remove(ioc, sas_device);
  938. }
  939. } else
  940. clear_bit(sas_device->handle, ioc->pend_os_device_add);
  941. }
  942. /**
  943. * _scsih_sas_device_init_add - insert sas_device to the list.
  944. * @ioc: per adapter object
  945. * @sas_device: the sas_device object
  946. * Context: This function will acquire ioc->sas_device_lock.
  947. *
  948. * Adding new object at driver load time to the ioc->sas_device_init_list.
  949. */
  950. static void
  951. _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
  952. struct _sas_device *sas_device)
  953. {
  954. unsigned long flags;
  955. dewtprintk(ioc,
  956. ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
  957. __func__, sas_device->handle,
  958. (u64)sas_device->sas_address));
  959. dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
  960. NULL, NULL));
  961. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  962. sas_device_get(sas_device);
  963. list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
  964. _scsih_determine_boot_device(ioc, sas_device, 0);
  965. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  966. }
  967. static struct _pcie_device *
  968. __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
  969. {
  970. struct _pcie_device *pcie_device;
  971. assert_spin_locked(&ioc->pcie_device_lock);
  972. list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
  973. if (pcie_device->wwid == wwid)
  974. goto found_device;
  975. list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
  976. if (pcie_device->wwid == wwid)
  977. goto found_device;
  978. return NULL;
  979. found_device:
  980. pcie_device_get(pcie_device);
  981. return pcie_device;
  982. }
  983. /**
  984. * mpt3sas_get_pdev_by_wwid - pcie device search
  985. * @ioc: per adapter object
  986. * @wwid: wwid
  987. *
  988. * Context: This function will acquire ioc->pcie_device_lock and will release
  989. * before returning the pcie_device object.
  990. *
  991. * This searches for pcie_device based on wwid, then return pcie_device object.
  992. */
  993. static struct _pcie_device *
  994. mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
  995. {
  996. struct _pcie_device *pcie_device;
  997. unsigned long flags;
  998. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  999. pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
  1000. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1001. return pcie_device;
  1002. }
  1003. static struct _pcie_device *
  1004. __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
  1005. int channel)
  1006. {
  1007. struct _pcie_device *pcie_device;
  1008. assert_spin_locked(&ioc->pcie_device_lock);
  1009. list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
  1010. if (pcie_device->id == id && pcie_device->channel == channel)
  1011. goto found_device;
  1012. list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
  1013. if (pcie_device->id == id && pcie_device->channel == channel)
  1014. goto found_device;
  1015. return NULL;
  1016. found_device:
  1017. pcie_device_get(pcie_device);
  1018. return pcie_device;
  1019. }
  1020. static struct _pcie_device *
  1021. __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  1022. {
  1023. struct _pcie_device *pcie_device;
  1024. assert_spin_locked(&ioc->pcie_device_lock);
  1025. list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
  1026. if (pcie_device->handle == handle)
  1027. goto found_device;
  1028. list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
  1029. if (pcie_device->handle == handle)
  1030. goto found_device;
  1031. return NULL;
  1032. found_device:
  1033. pcie_device_get(pcie_device);
  1034. return pcie_device;
  1035. }
  1036. /**
  1037. * mpt3sas_get_pdev_by_handle - pcie device search
  1038. * @ioc: per adapter object
  1039. * @handle: Firmware device handle
  1040. *
  1041. * Context: This function will acquire ioc->pcie_device_lock and will release
  1042. * before returning the pcie_device object.
  1043. *
  1044. * This searches for pcie_device based on handle, then return pcie_device
  1045. * object.
  1046. */
  1047. struct _pcie_device *
  1048. mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  1049. {
  1050. struct _pcie_device *pcie_device;
  1051. unsigned long flags;
  1052. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1053. pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
  1054. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1055. return pcie_device;
  1056. }
  1057. /**
  1058. * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
  1059. * @ioc: per adapter object
  1060. * Context: This function will acquire ioc->pcie_device_lock
  1061. *
  1062. * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
  1063. * which has reported maximum among all available NVMe drives.
  1064. * Minimum max_shutdown_latency will be six seconds.
  1065. */
  1066. static void
  1067. _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
  1068. {
  1069. struct _pcie_device *pcie_device;
  1070. unsigned long flags;
  1071. u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
  1072. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1073. list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
  1074. if (pcie_device->shutdown_latency) {
  1075. if (shutdown_latency < pcie_device->shutdown_latency)
  1076. shutdown_latency =
  1077. pcie_device->shutdown_latency;
  1078. }
  1079. }
  1080. ioc->max_shutdown_latency = shutdown_latency;
  1081. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1082. }
  1083. /**
  1084. * _scsih_pcie_device_remove - remove pcie_device from list.
  1085. * @ioc: per adapter object
  1086. * @pcie_device: the pcie_device object
  1087. * Context: This function will acquire ioc->pcie_device_lock.
  1088. *
  1089. * If pcie_device is on the list, remove it and decrement its reference count.
  1090. */
  1091. static void
  1092. _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
  1093. struct _pcie_device *pcie_device)
  1094. {
  1095. unsigned long flags;
  1096. int was_on_pcie_device_list = 0;
  1097. u8 update_latency = 0;
  1098. if (!pcie_device)
  1099. return;
  1100. ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
  1101. pcie_device->handle, (u64)pcie_device->wwid);
  1102. if (pcie_device->enclosure_handle != 0)
  1103. ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
  1104. (u64)pcie_device->enclosure_logical_id,
  1105. pcie_device->slot);
  1106. if (pcie_device->connector_name[0] != '\0')
  1107. ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
  1108. pcie_device->enclosure_level,
  1109. pcie_device->connector_name);
  1110. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1111. if (!list_empty(&pcie_device->list)) {
  1112. list_del_init(&pcie_device->list);
  1113. was_on_pcie_device_list = 1;
  1114. }
  1115. if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
  1116. update_latency = 1;
  1117. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1118. if (was_on_pcie_device_list) {
  1119. kfree(pcie_device->serial_number);
  1120. pcie_device_put(pcie_device);
  1121. }
  1122. /*
  1123. * This device's RTD3 Entry Latency matches IOC's
  1124. * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
  1125. * from the available drives as current drive is getting removed.
  1126. */
  1127. if (update_latency)
  1128. _scsih_set_nvme_max_shutdown_latency(ioc);
  1129. }
  1130. /**
  1131. * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
  1132. * @ioc: per adapter object
  1133. * @handle: device handle
  1134. */
  1135. static void
  1136. _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  1137. {
  1138. struct _pcie_device *pcie_device;
  1139. unsigned long flags;
  1140. int was_on_pcie_device_list = 0;
  1141. u8 update_latency = 0;
  1142. if (ioc->shost_recovery)
  1143. return;
  1144. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1145. pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
  1146. if (pcie_device) {
  1147. if (!list_empty(&pcie_device->list)) {
  1148. list_del_init(&pcie_device->list);
  1149. was_on_pcie_device_list = 1;
  1150. pcie_device_put(pcie_device);
  1151. }
  1152. if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
  1153. update_latency = 1;
  1154. }
  1155. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1156. if (was_on_pcie_device_list) {
  1157. _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
  1158. pcie_device_put(pcie_device);
  1159. }
  1160. /*
  1161. * This device's RTD3 Entry Latency matches IOC's
  1162. * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
  1163. * from the available drives as current drive is getting removed.
  1164. */
  1165. if (update_latency)
  1166. _scsih_set_nvme_max_shutdown_latency(ioc);
  1167. }
  1168. /**
  1169. * _scsih_pcie_device_add - add pcie_device object
  1170. * @ioc: per adapter object
  1171. * @pcie_device: pcie_device object
  1172. *
  1173. * This is added to the pcie_device_list link list.
  1174. */
  1175. static void
  1176. _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
  1177. struct _pcie_device *pcie_device)
  1178. {
  1179. unsigned long flags;
  1180. dewtprintk(ioc,
  1181. ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
  1182. __func__,
  1183. pcie_device->handle, (u64)pcie_device->wwid));
  1184. if (pcie_device->enclosure_handle != 0)
  1185. dewtprintk(ioc,
  1186. ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
  1187. __func__,
  1188. (u64)pcie_device->enclosure_logical_id,
  1189. pcie_device->slot));
  1190. if (pcie_device->connector_name[0] != '\0')
  1191. dewtprintk(ioc,
  1192. ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
  1193. __func__, pcie_device->enclosure_level,
  1194. pcie_device->connector_name));
  1195. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1196. pcie_device_get(pcie_device);
  1197. list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
  1198. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1199. if (pcie_device->access_status ==
  1200. MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
  1201. clear_bit(pcie_device->handle, ioc->pend_os_device_add);
  1202. return;
  1203. }
  1204. if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
  1205. _scsih_pcie_device_remove(ioc, pcie_device);
  1206. } else if (!pcie_device->starget) {
  1207. if (!ioc->is_driver_loading) {
  1208. /*TODO-- Need to find out whether this condition will occur or not*/
  1209. clear_bit(pcie_device->handle, ioc->pend_os_device_add);
  1210. }
  1211. } else
  1212. clear_bit(pcie_device->handle, ioc->pend_os_device_add);
  1213. }
  1214. /*
  1215. * _scsih_pcie_device_init_add - insert pcie_device to the init list.
  1216. * @ioc: per adapter object
  1217. * @pcie_device: the pcie_device object
  1218. * Context: This function will acquire ioc->pcie_device_lock.
  1219. *
  1220. * Adding new object at driver load time to the ioc->pcie_device_init_list.
  1221. */
  1222. static void
  1223. _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
  1224. struct _pcie_device *pcie_device)
  1225. {
  1226. unsigned long flags;
  1227. dewtprintk(ioc,
  1228. ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
  1229. __func__,
  1230. pcie_device->handle, (u64)pcie_device->wwid));
  1231. if (pcie_device->enclosure_handle != 0)
  1232. dewtprintk(ioc,
  1233. ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
  1234. __func__,
  1235. (u64)pcie_device->enclosure_logical_id,
  1236. pcie_device->slot));
  1237. if (pcie_device->connector_name[0] != '\0')
  1238. dewtprintk(ioc,
  1239. ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
  1240. __func__, pcie_device->enclosure_level,
  1241. pcie_device->connector_name));
  1242. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1243. pcie_device_get(pcie_device);
  1244. list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
  1245. if (pcie_device->access_status !=
  1246. MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
  1247. _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
  1248. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1249. }
  1250. /**
  1251. * _scsih_raid_device_find_by_id - raid device search
  1252. * @ioc: per adapter object
  1253. * @id: sas device target id
  1254. * @channel: sas device channel
  1255. * Context: Calling function should acquire ioc->raid_device_lock
  1256. *
  1257. * This searches for raid_device based on target id, then return raid_device
  1258. * object.
  1259. */
  1260. static struct _raid_device *
  1261. _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
  1262. {
  1263. struct _raid_device *raid_device, *r;
  1264. r = NULL;
  1265. list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
  1266. if (raid_device->id == id && raid_device->channel == channel) {
  1267. r = raid_device;
  1268. goto out;
  1269. }
  1270. }
  1271. out:
  1272. return r;
  1273. }
  1274. /**
  1275. * mpt3sas_raid_device_find_by_handle - raid device search
  1276. * @ioc: per adapter object
  1277. * @handle: sas device handle (assigned by firmware)
  1278. * Context: Calling function should acquire ioc->raid_device_lock
  1279. *
  1280. * This searches for raid_device based on handle, then return raid_device
  1281. * object.
  1282. */
  1283. struct _raid_device *
  1284. mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  1285. {
  1286. struct _raid_device *raid_device, *r;
  1287. r = NULL;
  1288. list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
  1289. if (raid_device->handle != handle)
  1290. continue;
  1291. r = raid_device;
  1292. goto out;
  1293. }
  1294. out:
  1295. return r;
  1296. }
  1297. /**
  1298. * _scsih_raid_device_find_by_wwid - raid device search
  1299. * @ioc: per adapter object
  1300. * @wwid: ?
  1301. * Context: Calling function should acquire ioc->raid_device_lock
  1302. *
  1303. * This searches for raid_device based on wwid, then return raid_device
  1304. * object.
  1305. */
  1306. static struct _raid_device *
  1307. _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
  1308. {
  1309. struct _raid_device *raid_device, *r;
  1310. r = NULL;
  1311. list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
  1312. if (raid_device->wwid != wwid)
  1313. continue;
  1314. r = raid_device;
  1315. goto out;
  1316. }
  1317. out:
  1318. return r;
  1319. }
  1320. /**
  1321. * _scsih_raid_device_add - add raid_device object
  1322. * @ioc: per adapter object
  1323. * @raid_device: raid_device object
  1324. *
  1325. * This is added to the raid_device_list link list.
  1326. */
  1327. static void
  1328. _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
  1329. struct _raid_device *raid_device)
  1330. {
  1331. unsigned long flags;
  1332. dewtprintk(ioc,
  1333. ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
  1334. __func__,
  1335. raid_device->handle, (u64)raid_device->wwid));
  1336. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  1337. list_add_tail(&raid_device->list, &ioc->raid_device_list);
  1338. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  1339. }
  1340. /**
  1341. * _scsih_raid_device_remove - delete raid_device object
  1342. * @ioc: per adapter object
  1343. * @raid_device: raid_device object
  1344. *
  1345. */
  1346. static void
  1347. _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
  1348. struct _raid_device *raid_device)
  1349. {
  1350. unsigned long flags;
  1351. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  1352. list_del(&raid_device->list);
  1353. kfree(raid_device);
  1354. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  1355. }
  1356. /**
  1357. * mpt3sas_scsih_expander_find_by_handle - expander device search
  1358. * @ioc: per adapter object
  1359. * @handle: expander handle (assigned by firmware)
  1360. * Context: Calling function should acquire ioc->sas_device_lock
  1361. *
  1362. * This searches for expander device based on handle, then returns the
  1363. * sas_node object.
  1364. */
  1365. struct _sas_node *
  1366. mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  1367. {
  1368. struct _sas_node *sas_expander, *r;
  1369. r = NULL;
  1370. list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
  1371. if (sas_expander->handle != handle)
  1372. continue;
  1373. r = sas_expander;
  1374. goto out;
  1375. }
  1376. out:
  1377. return r;
  1378. }
  1379. /**
  1380. * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
  1381. * @ioc: per adapter object
  1382. * @handle: enclosure handle (assigned by firmware)
  1383. * Context: Calling function should acquire ioc->sas_device_lock
  1384. *
  1385. * This searches for enclosure device based on handle, then returns the
  1386. * enclosure object.
  1387. */
  1388. static struct _enclosure_node *
  1389. mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  1390. {
  1391. struct _enclosure_node *enclosure_dev, *r;
  1392. r = NULL;
  1393. list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
  1394. if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
  1395. continue;
  1396. r = enclosure_dev;
  1397. goto out;
  1398. }
  1399. out:
  1400. return r;
  1401. }
  1402. /**
  1403. * mpt3sas_scsih_expander_find_by_sas_address - expander device search
  1404. * @ioc: per adapter object
  1405. * @sas_address: sas address
  1406. * @port: hba port entry
  1407. * Context: Calling function should acquire ioc->sas_node_lock.
  1408. *
  1409. * This searches for expander device based on sas_address & port number,
  1410. * then returns the sas_node object.
  1411. */
  1412. struct _sas_node *
  1413. mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
  1414. u64 sas_address, struct hba_port *port)
  1415. {
  1416. struct _sas_node *sas_expander, *r = NULL;
  1417. if (!port)
  1418. return r;
  1419. list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
  1420. if (sas_expander->sas_address != sas_address)
  1421. continue;
  1422. if (sas_expander->port != port)
  1423. continue;
  1424. r = sas_expander;
  1425. goto out;
  1426. }
  1427. out:
  1428. return r;
  1429. }
  1430. /**
  1431. * _scsih_expander_node_add - insert expander device to the list.
  1432. * @ioc: per adapter object
  1433. * @sas_expander: the sas_device object
  1434. * Context: This function will acquire ioc->sas_node_lock.
  1435. *
  1436. * Adding new object to the ioc->sas_expander_list.
  1437. */
  1438. static void
  1439. _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
  1440. struct _sas_node *sas_expander)
  1441. {
  1442. unsigned long flags;
  1443. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  1444. list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
  1445. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  1446. }
  1447. /**
  1448. * _scsih_is_end_device - determines if device is an end device
  1449. * @device_info: bitfield providing information about the device.
  1450. * Context: none
  1451. *
  1452. * Return: 1 if end device.
  1453. */
  1454. static int
  1455. _scsih_is_end_device(u32 device_info)
  1456. {
  1457. if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
  1458. ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
  1459. (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
  1460. (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
  1461. return 1;
  1462. else
  1463. return 0;
  1464. }
  1465. /**
  1466. * _scsih_is_nvme_pciescsi_device - determines if
  1467. * device is an pcie nvme/scsi device
  1468. * @device_info: bitfield providing information about the device.
  1469. * Context: none
  1470. *
  1471. * Returns 1 if device is pcie device type nvme/scsi.
  1472. */
  1473. static int
  1474. _scsih_is_nvme_pciescsi_device(u32 device_info)
  1475. {
  1476. if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
  1477. == MPI26_PCIE_DEVINFO_NVME) ||
  1478. ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
  1479. == MPI26_PCIE_DEVINFO_SCSI))
  1480. return 1;
  1481. else
  1482. return 0;
  1483. }
  1484. /**
  1485. * _scsih_scsi_lookup_find_by_target - search for matching channel:id
  1486. * @ioc: per adapter object
  1487. * @id: target id
  1488. * @channel: channel
  1489. * Context: This function will acquire ioc->scsi_lookup_lock.
  1490. *
  1491. * This will search for a matching channel:id in the scsi_lookup array,
  1492. * returning 1 if found.
  1493. */
  1494. static u8
  1495. _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
  1496. int channel)
  1497. {
  1498. int smid;
  1499. struct scsi_cmnd *scmd;
  1500. for (smid = 1;
  1501. smid <= ioc->shost->can_queue; smid++) {
  1502. scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
  1503. if (!scmd)
  1504. continue;
  1505. if (scmd->device->id == id &&
  1506. scmd->device->channel == channel)
  1507. return 1;
  1508. }
  1509. return 0;
  1510. }
  1511. /**
  1512. * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
  1513. * @ioc: per adapter object
  1514. * @id: target id
  1515. * @lun: lun number
  1516. * @channel: channel
  1517. * Context: This function will acquire ioc->scsi_lookup_lock.
  1518. *
  1519. * This will search for a matching channel:id:lun in the scsi_lookup array,
  1520. * returning 1 if found.
  1521. */
  1522. static u8
  1523. _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
  1524. unsigned int lun, int channel)
  1525. {
  1526. int smid;
  1527. struct scsi_cmnd *scmd;
  1528. for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
  1529. scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
  1530. if (!scmd)
  1531. continue;
  1532. if (scmd->device->id == id &&
  1533. scmd->device->channel == channel &&
  1534. scmd->device->lun == lun)
  1535. return 1;
  1536. }
  1537. return 0;
  1538. }
  1539. /**
  1540. * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
  1541. * @ioc: per adapter object
  1542. * @smid: system request message index
  1543. *
  1544. * Return: the smid stored scmd pointer.
  1545. * Then will dereference the stored scmd pointer.
  1546. */
  1547. struct scsi_cmnd *
  1548. mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  1549. {
  1550. struct scsi_cmnd *scmd = NULL;
  1551. struct scsiio_tracker *st;
  1552. Mpi25SCSIIORequest_t *mpi_request;
  1553. u16 tag = smid - 1;
  1554. if (smid > 0 &&
  1555. smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
  1556. u32 unique_tag =
  1557. ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
  1558. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  1559. /*
  1560. * If SCSI IO request is outstanding at driver level then
  1561. * DevHandle filed must be non-zero. If DevHandle is zero
  1562. * then it means that this smid is free at driver level,
  1563. * so return NULL.
  1564. */
  1565. if (!mpi_request->DevHandle)
  1566. return scmd;
  1567. scmd = scsi_host_find_tag(ioc->shost, unique_tag);
  1568. if (scmd) {
  1569. st = scsi_cmd_priv(scmd);
  1570. if (st->cb_idx == 0xFF || st->smid == 0)
  1571. scmd = NULL;
  1572. }
  1573. }
  1574. return scmd;
  1575. }
  1576. /**
  1577. * scsih_change_queue_depth - setting device queue depth
  1578. * @sdev: scsi device struct
  1579. * @qdepth: requested queue depth
  1580. *
  1581. * Return: queue depth.
  1582. */
  1583. static int
  1584. scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
  1585. {
  1586. struct Scsi_Host *shost = sdev->host;
  1587. int max_depth;
  1588. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  1589. struct MPT3SAS_DEVICE *sas_device_priv_data;
  1590. struct MPT3SAS_TARGET *sas_target_priv_data;
  1591. struct _sas_device *sas_device;
  1592. unsigned long flags;
  1593. max_depth = shost->can_queue;
  1594. /*
  1595. * limit max device queue for SATA to 32 if enable_sdev_max_qd
  1596. * is disabled.
  1597. */
  1598. if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
  1599. goto not_sata;
  1600. sas_device_priv_data = sdev->hostdata;
  1601. if (!sas_device_priv_data)
  1602. goto not_sata;
  1603. sas_target_priv_data = sas_device_priv_data->sas_target;
  1604. if (!sas_target_priv_data)
  1605. goto not_sata;
  1606. if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
  1607. goto not_sata;
  1608. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  1609. sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
  1610. if (sas_device) {
  1611. if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
  1612. max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
  1613. sas_device_put(sas_device);
  1614. }
  1615. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  1616. not_sata:
  1617. if (!sdev->tagged_supported)
  1618. max_depth = 1;
  1619. if (qdepth > max_depth)
  1620. qdepth = max_depth;
  1621. scsi_change_queue_depth(sdev, qdepth);
  1622. sdev_printk(KERN_INFO, sdev,
  1623. "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
  1624. sdev->queue_depth, sdev->tagged_supported,
  1625. sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
  1626. return sdev->queue_depth;
  1627. }
  1628. /**
  1629. * mpt3sas_scsih_change_queue_depth - setting device queue depth
  1630. * @sdev: scsi device struct
  1631. * @qdepth: requested queue depth
  1632. *
  1633. * Returns nothing.
  1634. */
  1635. void
  1636. mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
  1637. {
  1638. struct Scsi_Host *shost = sdev->host;
  1639. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  1640. if (ioc->enable_sdev_max_qd)
  1641. qdepth = shost->can_queue;
  1642. scsih_change_queue_depth(sdev, qdepth);
  1643. }
  1644. /**
  1645. * scsih_target_alloc - target add routine
  1646. * @starget: scsi target struct
  1647. *
  1648. * Return: 0 if ok. Any other return is assumed to be an error and
  1649. * the device is ignored.
  1650. */
  1651. static int
  1652. scsih_target_alloc(struct scsi_target *starget)
  1653. {
  1654. struct Scsi_Host *shost = dev_to_shost(&starget->dev);
  1655. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  1656. struct MPT3SAS_TARGET *sas_target_priv_data;
  1657. struct _sas_device *sas_device;
  1658. struct _raid_device *raid_device;
  1659. struct _pcie_device *pcie_device;
  1660. unsigned long flags;
  1661. struct sas_rphy *rphy;
  1662. sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
  1663. GFP_KERNEL);
  1664. if (!sas_target_priv_data)
  1665. return -ENOMEM;
  1666. starget->hostdata = sas_target_priv_data;
  1667. sas_target_priv_data->starget = starget;
  1668. sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
  1669. /* RAID volumes */
  1670. if (starget->channel == RAID_CHANNEL) {
  1671. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  1672. raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
  1673. starget->channel);
  1674. if (raid_device) {
  1675. sas_target_priv_data->handle = raid_device->handle;
  1676. sas_target_priv_data->sas_address = raid_device->wwid;
  1677. sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
  1678. if (ioc->is_warpdrive)
  1679. sas_target_priv_data->raid_device = raid_device;
  1680. raid_device->starget = starget;
  1681. }
  1682. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  1683. return 0;
  1684. }
  1685. /* PCIe devices */
  1686. if (starget->channel == PCIE_CHANNEL) {
  1687. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1688. pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
  1689. starget->channel);
  1690. if (pcie_device) {
  1691. sas_target_priv_data->handle = pcie_device->handle;
  1692. sas_target_priv_data->sas_address = pcie_device->wwid;
  1693. sas_target_priv_data->port = NULL;
  1694. sas_target_priv_data->pcie_dev = pcie_device;
  1695. pcie_device->starget = starget;
  1696. pcie_device->id = starget->id;
  1697. pcie_device->channel = starget->channel;
  1698. sas_target_priv_data->flags |=
  1699. MPT_TARGET_FLAGS_PCIE_DEVICE;
  1700. if (pcie_device->fast_path)
  1701. sas_target_priv_data->flags |=
  1702. MPT_TARGET_FASTPATH_IO;
  1703. }
  1704. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1705. return 0;
  1706. }
  1707. /* sas/sata devices */
  1708. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  1709. rphy = dev_to_rphy(starget->dev.parent);
  1710. sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
  1711. if (sas_device) {
  1712. sas_target_priv_data->handle = sas_device->handle;
  1713. sas_target_priv_data->sas_address = sas_device->sas_address;
  1714. sas_target_priv_data->port = sas_device->port;
  1715. sas_target_priv_data->sas_dev = sas_device;
  1716. sas_device->starget = starget;
  1717. sas_device->id = starget->id;
  1718. sas_device->channel = starget->channel;
  1719. if (test_bit(sas_device->handle, ioc->pd_handles))
  1720. sas_target_priv_data->flags |=
  1721. MPT_TARGET_FLAGS_RAID_COMPONENT;
  1722. if (sas_device->fast_path)
  1723. sas_target_priv_data->flags |=
  1724. MPT_TARGET_FASTPATH_IO;
  1725. }
  1726. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  1727. return 0;
  1728. }
  1729. /**
  1730. * scsih_target_destroy - target destroy routine
  1731. * @starget: scsi target struct
  1732. */
  1733. static void
  1734. scsih_target_destroy(struct scsi_target *starget)
  1735. {
  1736. struct Scsi_Host *shost = dev_to_shost(&starget->dev);
  1737. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  1738. struct MPT3SAS_TARGET *sas_target_priv_data;
  1739. struct _sas_device *sas_device;
  1740. struct _raid_device *raid_device;
  1741. struct _pcie_device *pcie_device;
  1742. unsigned long flags;
  1743. sas_target_priv_data = starget->hostdata;
  1744. if (!sas_target_priv_data)
  1745. return;
  1746. if (starget->channel == RAID_CHANNEL) {
  1747. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  1748. raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
  1749. starget->channel);
  1750. if (raid_device) {
  1751. raid_device->starget = NULL;
  1752. raid_device->sdev = NULL;
  1753. }
  1754. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  1755. goto out;
  1756. }
  1757. if (starget->channel == PCIE_CHANNEL) {
  1758. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1759. pcie_device = __mpt3sas_get_pdev_from_target(ioc,
  1760. sas_target_priv_data);
  1761. if (pcie_device && (pcie_device->starget == starget) &&
  1762. (pcie_device->id == starget->id) &&
  1763. (pcie_device->channel == starget->channel))
  1764. pcie_device->starget = NULL;
  1765. if (pcie_device) {
  1766. /*
  1767. * Corresponding get() is in _scsih_target_alloc()
  1768. */
  1769. sas_target_priv_data->pcie_dev = NULL;
  1770. pcie_device_put(pcie_device);
  1771. pcie_device_put(pcie_device);
  1772. }
  1773. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1774. goto out;
  1775. }
  1776. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  1777. sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
  1778. if (sas_device && (sas_device->starget == starget) &&
  1779. (sas_device->id == starget->id) &&
  1780. (sas_device->channel == starget->channel))
  1781. sas_device->starget = NULL;
  1782. if (sas_device) {
  1783. /*
  1784. * Corresponding get() is in _scsih_target_alloc()
  1785. */
  1786. sas_target_priv_data->sas_dev = NULL;
  1787. sas_device_put(sas_device);
  1788. sas_device_put(sas_device);
  1789. }
  1790. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  1791. out:
  1792. kfree(sas_target_priv_data);
  1793. starget->hostdata = NULL;
  1794. }
  1795. /**
  1796. * scsih_slave_alloc - device add routine
  1797. * @sdev: scsi device struct
  1798. *
  1799. * Return: 0 if ok. Any other return is assumed to be an error and
  1800. * the device is ignored.
  1801. */
  1802. static int
  1803. scsih_slave_alloc(struct scsi_device *sdev)
  1804. {
  1805. struct Scsi_Host *shost;
  1806. struct MPT3SAS_ADAPTER *ioc;
  1807. struct MPT3SAS_TARGET *sas_target_priv_data;
  1808. struct MPT3SAS_DEVICE *sas_device_priv_data;
  1809. struct scsi_target *starget;
  1810. struct _raid_device *raid_device;
  1811. struct _sas_device *sas_device;
  1812. struct _pcie_device *pcie_device;
  1813. unsigned long flags;
  1814. sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
  1815. GFP_KERNEL);
  1816. if (!sas_device_priv_data)
  1817. return -ENOMEM;
  1818. sas_device_priv_data->lun = sdev->lun;
  1819. sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
  1820. starget = scsi_target(sdev);
  1821. sas_target_priv_data = starget->hostdata;
  1822. sas_target_priv_data->num_luns++;
  1823. sas_device_priv_data->sas_target = sas_target_priv_data;
  1824. sdev->hostdata = sas_device_priv_data;
  1825. if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
  1826. sdev->no_uld_attach = 1;
  1827. shost = dev_to_shost(&starget->dev);
  1828. ioc = shost_priv(shost);
  1829. if (starget->channel == RAID_CHANNEL) {
  1830. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  1831. raid_device = _scsih_raid_device_find_by_id(ioc,
  1832. starget->id, starget->channel);
  1833. if (raid_device)
  1834. raid_device->sdev = sdev; /* raid is single lun */
  1835. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  1836. }
  1837. if (starget->channel == PCIE_CHANNEL) {
  1838. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1839. pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
  1840. sas_target_priv_data->sas_address);
  1841. if (pcie_device && (pcie_device->starget == NULL)) {
  1842. sdev_printk(KERN_INFO, sdev,
  1843. "%s : pcie_device->starget set to starget @ %d\n",
  1844. __func__, __LINE__);
  1845. pcie_device->starget = starget;
  1846. }
  1847. if (pcie_device)
  1848. pcie_device_put(pcie_device);
  1849. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1850. } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
  1851. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  1852. sas_device = __mpt3sas_get_sdev_by_addr(ioc,
  1853. sas_target_priv_data->sas_address,
  1854. sas_target_priv_data->port);
  1855. if (sas_device && (sas_device->starget == NULL)) {
  1856. sdev_printk(KERN_INFO, sdev,
  1857. "%s : sas_device->starget set to starget @ %d\n",
  1858. __func__, __LINE__);
  1859. sas_device->starget = starget;
  1860. }
  1861. if (sas_device)
  1862. sas_device_put(sas_device);
  1863. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  1864. }
  1865. return 0;
  1866. }
  1867. /**
  1868. * scsih_slave_destroy - device destroy routine
  1869. * @sdev: scsi device struct
  1870. */
  1871. static void
  1872. scsih_slave_destroy(struct scsi_device *sdev)
  1873. {
  1874. struct MPT3SAS_TARGET *sas_target_priv_data;
  1875. struct scsi_target *starget;
  1876. struct Scsi_Host *shost;
  1877. struct MPT3SAS_ADAPTER *ioc;
  1878. struct _sas_device *sas_device;
  1879. struct _pcie_device *pcie_device;
  1880. unsigned long flags;
  1881. if (!sdev->hostdata)
  1882. return;
  1883. starget = scsi_target(sdev);
  1884. sas_target_priv_data = starget->hostdata;
  1885. sas_target_priv_data->num_luns--;
  1886. shost = dev_to_shost(&starget->dev);
  1887. ioc = shost_priv(shost);
  1888. if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
  1889. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1890. pcie_device = __mpt3sas_get_pdev_from_target(ioc,
  1891. sas_target_priv_data);
  1892. if (pcie_device && !sas_target_priv_data->num_luns)
  1893. pcie_device->starget = NULL;
  1894. if (pcie_device)
  1895. pcie_device_put(pcie_device);
  1896. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1897. } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
  1898. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  1899. sas_device = __mpt3sas_get_sdev_from_target(ioc,
  1900. sas_target_priv_data);
  1901. if (sas_device && !sas_target_priv_data->num_luns)
  1902. sas_device->starget = NULL;
  1903. if (sas_device)
  1904. sas_device_put(sas_device);
  1905. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  1906. }
  1907. kfree(sdev->hostdata);
  1908. sdev->hostdata = NULL;
  1909. }
  1910. /**
  1911. * _scsih_display_sata_capabilities - sata capabilities
  1912. * @ioc: per adapter object
  1913. * @handle: device handle
  1914. * @sdev: scsi device struct
  1915. */
  1916. static void
  1917. _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
  1918. u16 handle, struct scsi_device *sdev)
  1919. {
  1920. Mpi2ConfigReply_t mpi_reply;
  1921. Mpi2SasDevicePage0_t sas_device_pg0;
  1922. u32 ioc_status;
  1923. u16 flags;
  1924. u32 device_info;
  1925. if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
  1926. MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
  1927. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  1928. __FILE__, __LINE__, __func__);
  1929. return;
  1930. }
  1931. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  1932. MPI2_IOCSTATUS_MASK;
  1933. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  1934. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  1935. __FILE__, __LINE__, __func__);
  1936. return;
  1937. }
  1938. flags = le16_to_cpu(sas_device_pg0.Flags);
  1939. device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
  1940. sdev_printk(KERN_INFO, sdev,
  1941. "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
  1942. "sw_preserve(%s)\n",
  1943. (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
  1944. (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
  1945. (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
  1946. "n",
  1947. (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
  1948. (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
  1949. (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
  1950. }
  1951. /*
  1952. * raid transport support -
  1953. * Enabled for SLES11 and newer, in older kernels the driver will panic when
  1954. * unloading the driver followed by a load - I believe that the subroutine
  1955. * raid_class_release() is not cleaning up properly.
  1956. */
  1957. /**
  1958. * scsih_is_raid - return boolean indicating device is raid volume
  1959. * @dev: the device struct object
  1960. */
  1961. static int
  1962. scsih_is_raid(struct device *dev)
  1963. {
  1964. struct scsi_device *sdev = to_scsi_device(dev);
  1965. struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
  1966. if (ioc->is_warpdrive)
  1967. return 0;
  1968. return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
  1969. }
  1970. static int
  1971. scsih_is_nvme(struct device *dev)
  1972. {
  1973. struct scsi_device *sdev = to_scsi_device(dev);
  1974. return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
  1975. }
  1976. /**
  1977. * scsih_get_resync - get raid volume resync percent complete
  1978. * @dev: the device struct object
  1979. */
  1980. static void
  1981. scsih_get_resync(struct device *dev)
  1982. {
  1983. struct scsi_device *sdev = to_scsi_device(dev);
  1984. struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
  1985. static struct _raid_device *raid_device;
  1986. unsigned long flags;
  1987. Mpi2RaidVolPage0_t vol_pg0;
  1988. Mpi2ConfigReply_t mpi_reply;
  1989. u32 volume_status_flags;
  1990. u8 percent_complete;
  1991. u16 handle;
  1992. percent_complete = 0;
  1993. handle = 0;
  1994. if (ioc->is_warpdrive)
  1995. goto out;
  1996. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  1997. raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
  1998. sdev->channel);
  1999. if (raid_device) {
  2000. handle = raid_device->handle;
  2001. percent_complete = raid_device->percent_complete;
  2002. }
  2003. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  2004. if (!handle)
  2005. goto out;
  2006. if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
  2007. MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
  2008. sizeof(Mpi2RaidVolPage0_t))) {
  2009. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  2010. __FILE__, __LINE__, __func__);
  2011. percent_complete = 0;
  2012. goto out;
  2013. }
  2014. volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
  2015. if (!(volume_status_flags &
  2016. MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
  2017. percent_complete = 0;
  2018. out:
  2019. switch (ioc->hba_mpi_version_belonged) {
  2020. case MPI2_VERSION:
  2021. raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
  2022. break;
  2023. case MPI25_VERSION:
  2024. case MPI26_VERSION:
  2025. raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
  2026. break;
  2027. }
  2028. }
  2029. /**
  2030. * scsih_get_state - get raid volume level
  2031. * @dev: the device struct object
  2032. */
  2033. static void
  2034. scsih_get_state(struct device *dev)
  2035. {
  2036. struct scsi_device *sdev = to_scsi_device(dev);
  2037. struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
  2038. static struct _raid_device *raid_device;
  2039. unsigned long flags;
  2040. Mpi2RaidVolPage0_t vol_pg0;
  2041. Mpi2ConfigReply_t mpi_reply;
  2042. u32 volstate;
  2043. enum raid_state state = RAID_STATE_UNKNOWN;
  2044. u16 handle = 0;
  2045. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  2046. raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
  2047. sdev->channel);
  2048. if (raid_device)
  2049. handle = raid_device->handle;
  2050. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  2051. if (!raid_device)
  2052. goto out;
  2053. if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
  2054. MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
  2055. sizeof(Mpi2RaidVolPage0_t))) {
  2056. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  2057. __FILE__, __LINE__, __func__);
  2058. goto out;
  2059. }
  2060. volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
  2061. if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
  2062. state = RAID_STATE_RESYNCING;
  2063. goto out;
  2064. }
  2065. switch (vol_pg0.VolumeState) {
  2066. case MPI2_RAID_VOL_STATE_OPTIMAL:
  2067. case MPI2_RAID_VOL_STATE_ONLINE:
  2068. state = RAID_STATE_ACTIVE;
  2069. break;
  2070. case MPI2_RAID_VOL_STATE_DEGRADED:
  2071. state = RAID_STATE_DEGRADED;
  2072. break;
  2073. case MPI2_RAID_VOL_STATE_FAILED:
  2074. case MPI2_RAID_VOL_STATE_MISSING:
  2075. state = RAID_STATE_OFFLINE;
  2076. break;
  2077. }
  2078. out:
  2079. switch (ioc->hba_mpi_version_belonged) {
  2080. case MPI2_VERSION:
  2081. raid_set_state(mpt2sas_raid_template, dev, state);
  2082. break;
  2083. case MPI25_VERSION:
  2084. case MPI26_VERSION:
  2085. raid_set_state(mpt3sas_raid_template, dev, state);
  2086. break;
  2087. }
  2088. }
  2089. /**
  2090. * _scsih_set_level - set raid level
  2091. * @ioc: ?
  2092. * @sdev: scsi device struct
  2093. * @volume_type: volume type
  2094. */
  2095. static void
  2096. _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
  2097. struct scsi_device *sdev, u8 volume_type)
  2098. {
  2099. enum raid_level level = RAID_LEVEL_UNKNOWN;
  2100. switch (volume_type) {
  2101. case MPI2_RAID_VOL_TYPE_RAID0:
  2102. level = RAID_LEVEL_0;
  2103. break;
  2104. case MPI2_RAID_VOL_TYPE_RAID10:
  2105. level = RAID_LEVEL_10;
  2106. break;
  2107. case MPI2_RAID_VOL_TYPE_RAID1E:
  2108. level = RAID_LEVEL_1E;
  2109. break;
  2110. case MPI2_RAID_VOL_TYPE_RAID1:
  2111. level = RAID_LEVEL_1;
  2112. break;
  2113. }
  2114. switch (ioc->hba_mpi_version_belonged) {
  2115. case MPI2_VERSION:
  2116. raid_set_level(mpt2sas_raid_template,
  2117. &sdev->sdev_gendev, level);
  2118. break;
  2119. case MPI25_VERSION:
  2120. case MPI26_VERSION:
  2121. raid_set_level(mpt3sas_raid_template,
  2122. &sdev->sdev_gendev, level);
  2123. break;
  2124. }
  2125. }
  2126. /**
  2127. * _scsih_get_volume_capabilities - volume capabilities
  2128. * @ioc: per adapter object
  2129. * @raid_device: the raid_device object
  2130. *
  2131. * Return: 0 for success, else 1
  2132. */
  2133. static int
  2134. _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
  2135. struct _raid_device *raid_device)
  2136. {
  2137. Mpi2RaidVolPage0_t *vol_pg0;
  2138. Mpi2RaidPhysDiskPage0_t pd_pg0;
  2139. Mpi2SasDevicePage0_t sas_device_pg0;
  2140. Mpi2ConfigReply_t mpi_reply;
  2141. u16 sz;
  2142. u8 num_pds;
  2143. if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
  2144. &num_pds)) || !num_pds) {
  2145. dfailprintk(ioc,
  2146. ioc_warn(ioc, "failure at %s:%d/%s()!\n",
  2147. __FILE__, __LINE__, __func__));
  2148. return 1;
  2149. }
  2150. raid_device->num_pds = num_pds;
  2151. sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
  2152. sizeof(Mpi2RaidVol0PhysDisk_t));
  2153. vol_pg0 = kzalloc(sz, GFP_KERNEL);
  2154. if (!vol_pg0) {
  2155. dfailprintk(ioc,
  2156. ioc_warn(ioc, "failure at %s:%d/%s()!\n",
  2157. __FILE__, __LINE__, __func__));
  2158. return 1;
  2159. }
  2160. if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
  2161. MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
  2162. dfailprintk(ioc,
  2163. ioc_warn(ioc, "failure at %s:%d/%s()!\n",
  2164. __FILE__, __LINE__, __func__));
  2165. kfree(vol_pg0);
  2166. return 1;
  2167. }
  2168. raid_device->volume_type = vol_pg0->VolumeType;
  2169. /* figure out what the underlying devices are by
  2170. * obtaining the device_info bits for the 1st device
  2171. */
  2172. if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
  2173. &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
  2174. vol_pg0->PhysDisk[0].PhysDiskNum))) {
  2175. if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
  2176. &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
  2177. le16_to_cpu(pd_pg0.DevHandle)))) {
  2178. raid_device->device_info =
  2179. le32_to_cpu(sas_device_pg0.DeviceInfo);
  2180. }
  2181. }
  2182. kfree(vol_pg0);
  2183. return 0;
  2184. }
  2185. /**
  2186. * _scsih_enable_tlr - setting TLR flags
  2187. * @ioc: per adapter object
  2188. * @sdev: scsi device struct
  2189. *
  2190. * Enabling Transaction Layer Retries for tape devices when
  2191. * vpd page 0x90 is present
  2192. *
  2193. */
  2194. static void
  2195. _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
  2196. {
  2197. /* only for TAPE */
  2198. if (sdev->type != TYPE_TAPE)
  2199. return;
  2200. if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
  2201. return;
  2202. sas_enable_tlr(sdev);
  2203. sdev_printk(KERN_INFO, sdev, "TLR %s\n",
  2204. sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
  2205. return;
  2206. }
  2207. /**
  2208. * scsih_slave_configure - device configure routine.
  2209. * @sdev: scsi device struct
  2210. *
  2211. * Return: 0 if ok. Any other return is assumed to be an error and
  2212. * the device is ignored.
  2213. */
  2214. static int
  2215. scsih_slave_configure(struct scsi_device *sdev)
  2216. {
  2217. struct Scsi_Host *shost = sdev->host;
  2218. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  2219. struct MPT3SAS_DEVICE *sas_device_priv_data;
  2220. struct MPT3SAS_TARGET *sas_target_priv_data;
  2221. struct _sas_device *sas_device;
  2222. struct _pcie_device *pcie_device;
  2223. struct _raid_device *raid_device;
  2224. unsigned long flags;
  2225. int qdepth;
  2226. u8 ssp_target = 0;
  2227. char *ds = "";
  2228. char *r_level = "";
  2229. u16 handle, volume_handle = 0;
  2230. u64 volume_wwid = 0;
  2231. qdepth = 1;
  2232. sas_device_priv_data = sdev->hostdata;
  2233. sas_device_priv_data->configured_lun = 1;
  2234. sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
  2235. sas_target_priv_data = sas_device_priv_data->sas_target;
  2236. handle = sas_target_priv_data->handle;
  2237. /* raid volume handling */
  2238. if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
  2239. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  2240. raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
  2241. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  2242. if (!raid_device) {
  2243. dfailprintk(ioc,
  2244. ioc_warn(ioc, "failure at %s:%d/%s()!\n",
  2245. __FILE__, __LINE__, __func__));
  2246. return 1;
  2247. }
  2248. if (_scsih_get_volume_capabilities(ioc, raid_device)) {
  2249. dfailprintk(ioc,
  2250. ioc_warn(ioc, "failure at %s:%d/%s()!\n",
  2251. __FILE__, __LINE__, __func__));
  2252. return 1;
  2253. }
  2254. /*
  2255. * WARPDRIVE: Initialize the required data for Direct IO
  2256. */
  2257. mpt3sas_init_warpdrive_properties(ioc, raid_device);
  2258. /* RAID Queue Depth Support
  2259. * IS volume = underlying qdepth of drive type, either
  2260. * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
  2261. * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
  2262. */
  2263. if (raid_device->device_info &
  2264. MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
  2265. qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
  2266. ds = "SSP";
  2267. } else {
  2268. qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
  2269. if (raid_device->device_info &
  2270. MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
  2271. ds = "SATA";
  2272. else
  2273. ds = "STP";
  2274. }
  2275. switch (raid_device->volume_type) {
  2276. case MPI2_RAID_VOL_TYPE_RAID0:
  2277. r_level = "RAID0";
  2278. break;
  2279. case MPI2_RAID_VOL_TYPE_RAID1E:
  2280. qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
  2281. if (ioc->manu_pg10.OEMIdentifier &&
  2282. (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
  2283. MFG10_GF0_R10_DISPLAY) &&
  2284. !(raid_device->num_pds % 2))
  2285. r_level = "RAID10";
  2286. else
  2287. r_level = "RAID1E";
  2288. break;
  2289. case MPI2_RAID_VOL_TYPE_RAID1:
  2290. qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
  2291. r_level = "RAID1";
  2292. break;
  2293. case MPI2_RAID_VOL_TYPE_RAID10:
  2294. qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
  2295. r_level = "RAID10";
  2296. break;
  2297. case MPI2_RAID_VOL_TYPE_UNKNOWN:
  2298. default:
  2299. qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
  2300. r_level = "RAIDX";
  2301. break;
  2302. }
  2303. if (!ioc->hide_ir_msg)
  2304. sdev_printk(KERN_INFO, sdev,
  2305. "%s: handle(0x%04x), wwid(0x%016llx),"
  2306. " pd_count(%d), type(%s)\n",
  2307. r_level, raid_device->handle,
  2308. (unsigned long long)raid_device->wwid,
  2309. raid_device->num_pds, ds);
  2310. if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
  2311. blk_queue_max_hw_sectors(sdev->request_queue,
  2312. MPT3SAS_RAID_MAX_SECTORS);
  2313. sdev_printk(KERN_INFO, sdev,
  2314. "Set queue's max_sector to: %u\n",
  2315. MPT3SAS_RAID_MAX_SECTORS);
  2316. }
  2317. mpt3sas_scsih_change_queue_depth(sdev, qdepth);
  2318. /* raid transport support */
  2319. if (!ioc->is_warpdrive)
  2320. _scsih_set_level(ioc, sdev, raid_device->volume_type);
  2321. return 0;
  2322. }
  2323. /* non-raid handling */
  2324. if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
  2325. if (mpt3sas_config_get_volume_handle(ioc, handle,
  2326. &volume_handle)) {
  2327. dfailprintk(ioc,
  2328. ioc_warn(ioc, "failure at %s:%d/%s()!\n",
  2329. __FILE__, __LINE__, __func__));
  2330. return 1;
  2331. }
  2332. if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
  2333. volume_handle, &volume_wwid)) {
  2334. dfailprintk(ioc,
  2335. ioc_warn(ioc, "failure at %s:%d/%s()!\n",
  2336. __FILE__, __LINE__, __func__));
  2337. return 1;
  2338. }
  2339. }
  2340. /* PCIe handling */
  2341. if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
  2342. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  2343. pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
  2344. sas_device_priv_data->sas_target->sas_address);
  2345. if (!pcie_device) {
  2346. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  2347. dfailprintk(ioc,
  2348. ioc_warn(ioc, "failure at %s:%d/%s()!\n",
  2349. __FILE__, __LINE__, __func__));
  2350. return 1;
  2351. }
  2352. qdepth = ioc->max_nvme_qd;
  2353. ds = "NVMe";
  2354. sdev_printk(KERN_INFO, sdev,
  2355. "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
  2356. ds, handle, (unsigned long long)pcie_device->wwid,
  2357. pcie_device->port_num);
  2358. if (pcie_device->enclosure_handle != 0)
  2359. sdev_printk(KERN_INFO, sdev,
  2360. "%s: enclosure logical id(0x%016llx), slot(%d)\n",
  2361. ds,
  2362. (unsigned long long)pcie_device->enclosure_logical_id,
  2363. pcie_device->slot);
  2364. if (pcie_device->connector_name[0] != '\0')
  2365. sdev_printk(KERN_INFO, sdev,
  2366. "%s: enclosure level(0x%04x),"
  2367. "connector name( %s)\n", ds,
  2368. pcie_device->enclosure_level,
  2369. pcie_device->connector_name);
  2370. if (pcie_device->nvme_mdts)
  2371. blk_queue_max_hw_sectors(sdev->request_queue,
  2372. pcie_device->nvme_mdts/512);
  2373. pcie_device_put(pcie_device);
  2374. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  2375. mpt3sas_scsih_change_queue_depth(sdev, qdepth);
  2376. /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
  2377. ** merged and can eliminate holes created during merging
  2378. ** operation.
  2379. **/
  2380. blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
  2381. sdev->request_queue);
  2382. blk_queue_virt_boundary(sdev->request_queue,
  2383. ioc->page_size - 1);
  2384. return 0;
  2385. }
  2386. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  2387. sas_device = __mpt3sas_get_sdev_by_addr(ioc,
  2388. sas_device_priv_data->sas_target->sas_address,
  2389. sas_device_priv_data->sas_target->port);
  2390. if (!sas_device) {
  2391. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  2392. dfailprintk(ioc,
  2393. ioc_warn(ioc, "failure at %s:%d/%s()!\n",
  2394. __FILE__, __LINE__, __func__));
  2395. return 1;
  2396. }
  2397. sas_device->volume_handle = volume_handle;
  2398. sas_device->volume_wwid = volume_wwid;
  2399. if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
  2400. qdepth = (sas_device->port_type > 1) ?
  2401. ioc->max_wideport_qd : ioc->max_narrowport_qd;
  2402. ssp_target = 1;
  2403. if (sas_device->device_info &
  2404. MPI2_SAS_DEVICE_INFO_SEP) {
  2405. sdev_printk(KERN_WARNING, sdev,
  2406. "set ignore_delay_remove for handle(0x%04x)\n",
  2407. sas_device_priv_data->sas_target->handle);
  2408. sas_device_priv_data->ignore_delay_remove = 1;
  2409. ds = "SES";
  2410. } else
  2411. ds = "SSP";
  2412. } else {
  2413. qdepth = ioc->max_sata_qd;
  2414. if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
  2415. ds = "STP";
  2416. else if (sas_device->device_info &
  2417. MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
  2418. ds = "SATA";
  2419. }
  2420. sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
  2421. "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
  2422. ds, handle, (unsigned long long)sas_device->sas_address,
  2423. sas_device->phy, (unsigned long long)sas_device->device_name);
  2424. _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
  2425. sas_device_put(sas_device);
  2426. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  2427. if (!ssp_target)
  2428. _scsih_display_sata_capabilities(ioc, handle, sdev);
  2429. mpt3sas_scsih_change_queue_depth(sdev, qdepth);
  2430. if (ssp_target) {
  2431. sas_read_port_mode_page(sdev);
  2432. _scsih_enable_tlr(ioc, sdev);
  2433. }
  2434. return 0;
  2435. }
  2436. /**
  2437. * scsih_bios_param - fetch head, sector, cylinder info for a disk
  2438. * @sdev: scsi device struct
  2439. * @bdev: pointer to block device context
  2440. * @capacity: device size (in 512 byte sectors)
  2441. * @params: three element array to place output:
  2442. * params[0] number of heads (max 255)
  2443. * params[1] number of sectors (max 63)
  2444. * params[2] number of cylinders
  2445. */
  2446. static int
  2447. scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
  2448. sector_t capacity, int params[])
  2449. {
  2450. int heads;
  2451. int sectors;
  2452. sector_t cylinders;
  2453. ulong dummy;
  2454. heads = 64;
  2455. sectors = 32;
  2456. dummy = heads * sectors;
  2457. cylinders = capacity;
  2458. sector_div(cylinders, dummy);
  2459. /*
  2460. * Handle extended translation size for logical drives
  2461. * > 1Gb
  2462. */
  2463. if ((ulong)capacity >= 0x200000) {
  2464. heads = 255;
  2465. sectors = 63;
  2466. dummy = heads * sectors;
  2467. cylinders = capacity;
  2468. sector_div(cylinders, dummy);
  2469. }
  2470. /* return result */
  2471. params[0] = heads;
  2472. params[1] = sectors;
  2473. params[2] = cylinders;
  2474. return 0;
  2475. }
  2476. /**
  2477. * _scsih_response_code - translation of device response code
  2478. * @ioc: per adapter object
  2479. * @response_code: response code returned by the device
  2480. */
  2481. static void
  2482. _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
  2483. {
  2484. char *desc;
  2485. switch (response_code) {
  2486. case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
  2487. desc = "task management request completed";
  2488. break;
  2489. case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
  2490. desc = "invalid frame";
  2491. break;
  2492. case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
  2493. desc = "task management request not supported";
  2494. break;
  2495. case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
  2496. desc = "task management request failed";
  2497. break;
  2498. case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
  2499. desc = "task management request succeeded";
  2500. break;
  2501. case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
  2502. desc = "invalid lun";
  2503. break;
  2504. case 0xA:
  2505. desc = "overlapped tag attempted";
  2506. break;
  2507. case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
  2508. desc = "task queued, however not sent to target";
  2509. break;
  2510. default:
  2511. desc = "unknown";
  2512. break;
  2513. }
  2514. ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
  2515. }
  2516. /**
  2517. * _scsih_tm_done - tm completion routine
  2518. * @ioc: per adapter object
  2519. * @smid: system request message index
  2520. * @msix_index: MSIX table index supplied by the OS
  2521. * @reply: reply message frame(lower 32bit addr)
  2522. * Context: none.
  2523. *
  2524. * The callback handler when using scsih_issue_tm.
  2525. *
  2526. * Return: 1 meaning mf should be freed from _base_interrupt
  2527. * 0 means the mf is freed from this function.
  2528. */
  2529. static u8
  2530. _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
  2531. {
  2532. MPI2DefaultReply_t *mpi_reply;
  2533. if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
  2534. return 1;
  2535. if (ioc->tm_cmds.smid != smid)
  2536. return 1;
  2537. ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
  2538. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  2539. if (mpi_reply) {
  2540. memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
  2541. ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
  2542. }
  2543. ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
  2544. complete(&ioc->tm_cmds.done);
  2545. return 1;
  2546. }
  2547. /**
  2548. * mpt3sas_scsih_set_tm_flag - set per target tm_busy
  2549. * @ioc: per adapter object
  2550. * @handle: device handle
  2551. *
  2552. * During taskmangement request, we need to freeze the device queue.
  2553. */
  2554. void
  2555. mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  2556. {
  2557. struct MPT3SAS_DEVICE *sas_device_priv_data;
  2558. struct scsi_device *sdev;
  2559. u8 skip = 0;
  2560. shost_for_each_device(sdev, ioc->shost) {
  2561. if (skip)
  2562. continue;
  2563. sas_device_priv_data = sdev->hostdata;
  2564. if (!sas_device_priv_data)
  2565. continue;
  2566. if (sas_device_priv_data->sas_target->handle == handle) {
  2567. sas_device_priv_data->sas_target->tm_busy = 1;
  2568. skip = 1;
  2569. ioc->ignore_loginfos = 1;
  2570. }
  2571. }
  2572. }
  2573. /**
  2574. * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
  2575. * @ioc: per adapter object
  2576. * @handle: device handle
  2577. *
  2578. * During taskmangement request, we need to freeze the device queue.
  2579. */
  2580. void
  2581. mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  2582. {
  2583. struct MPT3SAS_DEVICE *sas_device_priv_data;
  2584. struct scsi_device *sdev;
  2585. u8 skip = 0;
  2586. shost_for_each_device(sdev, ioc->shost) {
  2587. if (skip)
  2588. continue;
  2589. sas_device_priv_data = sdev->hostdata;
  2590. if (!sas_device_priv_data)
  2591. continue;
  2592. if (sas_device_priv_data->sas_target->handle == handle) {
  2593. sas_device_priv_data->sas_target->tm_busy = 0;
  2594. skip = 1;
  2595. ioc->ignore_loginfos = 0;
  2596. }
  2597. }
  2598. }
  2599. /**
  2600. * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
  2601. * @ioc: per adapter object
  2602. * @channel: the channel assigned by the OS
  2603. * @id: the id assigned by the OS
  2604. * @lun: lun number
  2605. * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
  2606. * @smid_task: smid assigned to the task
  2607. *
  2608. * Look whether TM has aborted the timed out SCSI command, if
  2609. * TM has aborted the IO then return SUCCESS else return FAILED.
  2610. */
  2611. static int
  2612. scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
  2613. uint id, uint lun, u8 type, u16 smid_task)
  2614. {
  2615. if (smid_task <= ioc->shost->can_queue) {
  2616. switch (type) {
  2617. case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
  2618. if (!(_scsih_scsi_lookup_find_by_target(ioc,
  2619. id, channel)))
  2620. return SUCCESS;
  2621. break;
  2622. case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
  2623. case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
  2624. if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
  2625. lun, channel)))
  2626. return SUCCESS;
  2627. break;
  2628. default:
  2629. return SUCCESS;
  2630. }
  2631. } else if (smid_task == ioc->scsih_cmds.smid) {
  2632. if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
  2633. (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
  2634. return SUCCESS;
  2635. } else if (smid_task == ioc->ctl_cmds.smid) {
  2636. if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
  2637. (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
  2638. return SUCCESS;
  2639. }
  2640. return FAILED;
  2641. }
  2642. /**
  2643. * scsih_tm_post_processing - post processing of target & LUN reset
  2644. * @ioc: per adapter object
  2645. * @handle: device handle
  2646. * @channel: the channel assigned by the OS
  2647. * @id: the id assigned by the OS
  2648. * @lun: lun number
  2649. * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
  2650. * @smid_task: smid assigned to the task
  2651. *
  2652. * Post processing of target & LUN reset. Due to interrupt latency
  2653. * issue it possible that interrupt for aborted IO might not be
  2654. * received yet. So before returning failure status, poll the
  2655. * reply descriptor pools for the reply of timed out SCSI command.
  2656. * Return FAILED status if reply for timed out is not received
  2657. * otherwise return SUCCESS.
  2658. */
  2659. static int
  2660. scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
  2661. uint channel, uint id, uint lun, u8 type, u16 smid_task)
  2662. {
  2663. int rc;
  2664. rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
  2665. if (rc == SUCCESS)
  2666. return rc;
  2667. ioc_info(ioc,
  2668. "Poll ReplyDescriptor queues for completion of"
  2669. " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
  2670. smid_task, type, handle);
  2671. /*
  2672. * Due to interrupt latency issues, driver may receive interrupt for
  2673. * TM first and then for aborted SCSI IO command. So, poll all the
  2674. * ReplyDescriptor pools before returning the FAILED status to SML.
  2675. */
  2676. mpt3sas_base_mask_interrupts(ioc);
  2677. mpt3sas_base_sync_reply_irqs(ioc, 1);
  2678. mpt3sas_base_unmask_interrupts(ioc);
  2679. return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
  2680. }
  2681. /**
  2682. * mpt3sas_scsih_issue_tm - main routine for sending tm requests
  2683. * @ioc: per adapter struct
  2684. * @handle: device handle
  2685. * @channel: the channel assigned by the OS
  2686. * @id: the id assigned by the OS
  2687. * @lun: lun number
  2688. * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
  2689. * @smid_task: smid assigned to the task
  2690. * @msix_task: MSIX table index supplied by the OS
  2691. * @timeout: timeout in seconds
  2692. * @tr_method: Target Reset Method
  2693. * Context: user
  2694. *
  2695. * A generic API for sending task management requests to firmware.
  2696. *
  2697. * The callback index is set inside `ioc->tm_cb_idx`.
  2698. * The caller is responsible to check for outstanding commands.
  2699. *
  2700. * Return: SUCCESS or FAILED.
  2701. */
  2702. int
  2703. mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
  2704. uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
  2705. u8 timeout, u8 tr_method)
  2706. {
  2707. Mpi2SCSITaskManagementRequest_t *mpi_request;
  2708. Mpi2SCSITaskManagementReply_t *mpi_reply;
  2709. Mpi25SCSIIORequest_t *request;
  2710. u16 smid = 0;
  2711. u32 ioc_state;
  2712. int rc;
  2713. u8 issue_reset = 0;
  2714. lockdep_assert_held(&ioc->tm_cmds.mutex);
  2715. if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
  2716. ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
  2717. return FAILED;
  2718. }
  2719. if (ioc->shost_recovery || ioc->remove_host ||
  2720. ioc->pci_error_recovery) {
  2721. ioc_info(ioc, "%s: host reset in progress!\n", __func__);
  2722. return FAILED;
  2723. }
  2724. ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
  2725. if (ioc_state & MPI2_DOORBELL_USED) {
  2726. dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
  2727. rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  2728. return (!rc) ? SUCCESS : FAILED;
  2729. }
  2730. if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
  2731. mpt3sas_print_fault_code(ioc, ioc_state &
  2732. MPI2_DOORBELL_DATA_MASK);
  2733. rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  2734. return (!rc) ? SUCCESS : FAILED;
  2735. } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
  2736. MPI2_IOC_STATE_COREDUMP) {
  2737. mpt3sas_print_coredump_info(ioc, ioc_state &
  2738. MPI2_DOORBELL_DATA_MASK);
  2739. rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  2740. return (!rc) ? SUCCESS : FAILED;
  2741. }
  2742. smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
  2743. if (!smid) {
  2744. ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
  2745. return FAILED;
  2746. }
  2747. dtmprintk(ioc,
  2748. ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
  2749. handle, type, smid_task, timeout, tr_method));
  2750. ioc->tm_cmds.status = MPT3_CMD_PENDING;
  2751. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  2752. ioc->tm_cmds.smid = smid;
  2753. memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
  2754. memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
  2755. mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
  2756. mpi_request->DevHandle = cpu_to_le16(handle);
  2757. mpi_request->TaskType = type;
  2758. if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
  2759. type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
  2760. mpi_request->MsgFlags = tr_method;
  2761. mpi_request->TaskMID = cpu_to_le16(smid_task);
  2762. int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
  2763. mpt3sas_scsih_set_tm_flag(ioc, handle);
  2764. init_completion(&ioc->tm_cmds.done);
  2765. ioc->put_smid_hi_priority(ioc, smid, msix_task);
  2766. wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
  2767. if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
  2768. mpt3sas_check_cmd_timeout(ioc,
  2769. ioc->tm_cmds.status, mpi_request,
  2770. sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
  2771. if (issue_reset) {
  2772. rc = mpt3sas_base_hard_reset_handler(ioc,
  2773. FORCE_BIG_HAMMER);
  2774. rc = (!rc) ? SUCCESS : FAILED;
  2775. goto out;
  2776. }
  2777. }
  2778. /* sync IRQs in case those were busy during flush. */
  2779. mpt3sas_base_sync_reply_irqs(ioc, 0);
  2780. if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
  2781. mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
  2782. mpi_reply = ioc->tm_cmds.reply;
  2783. dtmprintk(ioc,
  2784. ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
  2785. le16_to_cpu(mpi_reply->IOCStatus),
  2786. le32_to_cpu(mpi_reply->IOCLogInfo),
  2787. le32_to_cpu(mpi_reply->TerminationCount)));
  2788. if (ioc->logging_level & MPT_DEBUG_TM) {
  2789. _scsih_response_code(ioc, mpi_reply->ResponseCode);
  2790. if (mpi_reply->IOCStatus)
  2791. _debug_dump_mf(mpi_request,
  2792. sizeof(Mpi2SCSITaskManagementRequest_t)/4);
  2793. }
  2794. }
  2795. switch (type) {
  2796. case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
  2797. rc = SUCCESS;
  2798. /*
  2799. * If DevHandle filed in smid_task's entry of request pool
  2800. * doesn't match with device handle on which this task abort
  2801. * TM is received then it means that TM has successfully
  2802. * aborted the timed out command. Since smid_task's entry in
  2803. * request pool will be memset to zero once the timed out
  2804. * command is returned to the SML. If the command is not
  2805. * aborted then smid_task’s entry won’t be cleared and it
  2806. * will have same DevHandle value on which this task abort TM
  2807. * is received and driver will return the TM status as FAILED.
  2808. */
  2809. request = mpt3sas_base_get_msg_frame(ioc, smid_task);
  2810. if (le16_to_cpu(request->DevHandle) != handle)
  2811. break;
  2812. ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
  2813. "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
  2814. handle, timeout, tr_method, smid_task, msix_task);
  2815. rc = FAILED;
  2816. break;
  2817. case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
  2818. case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
  2819. case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
  2820. rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
  2821. type, smid_task);
  2822. break;
  2823. case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
  2824. rc = SUCCESS;
  2825. break;
  2826. default:
  2827. rc = FAILED;
  2828. break;
  2829. }
  2830. out:
  2831. mpt3sas_scsih_clear_tm_flag(ioc, handle);
  2832. ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
  2833. return rc;
  2834. }
  2835. int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
  2836. uint channel, uint id, u64 lun, u8 type, u16 smid_task,
  2837. u16 msix_task, u8 timeout, u8 tr_method)
  2838. {
  2839. int ret;
  2840. mutex_lock(&ioc->tm_cmds.mutex);
  2841. ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
  2842. smid_task, msix_task, timeout, tr_method);
  2843. mutex_unlock(&ioc->tm_cmds.mutex);
  2844. return ret;
  2845. }
  2846. /**
  2847. * _scsih_tm_display_info - displays info about the device
  2848. * @ioc: per adapter struct
  2849. * @scmd: pointer to scsi command object
  2850. *
  2851. * Called by task management callback handlers.
  2852. */
  2853. static void
  2854. _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
  2855. {
  2856. struct scsi_target *starget = scmd->device->sdev_target;
  2857. struct MPT3SAS_TARGET *priv_target = starget->hostdata;
  2858. struct _sas_device *sas_device = NULL;
  2859. struct _pcie_device *pcie_device = NULL;
  2860. unsigned long flags;
  2861. char *device_str = NULL;
  2862. if (!priv_target)
  2863. return;
  2864. if (ioc->hide_ir_msg)
  2865. device_str = "WarpDrive";
  2866. else
  2867. device_str = "volume";
  2868. scsi_print_command(scmd);
  2869. if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
  2870. starget_printk(KERN_INFO, starget,
  2871. "%s handle(0x%04x), %s wwid(0x%016llx)\n",
  2872. device_str, priv_target->handle,
  2873. device_str, (unsigned long long)priv_target->sas_address);
  2874. } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
  2875. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  2876. pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
  2877. if (pcie_device) {
  2878. starget_printk(KERN_INFO, starget,
  2879. "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
  2880. pcie_device->handle,
  2881. (unsigned long long)pcie_device->wwid,
  2882. pcie_device->port_num);
  2883. if (pcie_device->enclosure_handle != 0)
  2884. starget_printk(KERN_INFO, starget,
  2885. "enclosure logical id(0x%016llx), slot(%d)\n",
  2886. (unsigned long long)
  2887. pcie_device->enclosure_logical_id,
  2888. pcie_device->slot);
  2889. if (pcie_device->connector_name[0] != '\0')
  2890. starget_printk(KERN_INFO, starget,
  2891. "enclosure level(0x%04x), connector name( %s)\n",
  2892. pcie_device->enclosure_level,
  2893. pcie_device->connector_name);
  2894. pcie_device_put(pcie_device);
  2895. }
  2896. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  2897. } else {
  2898. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  2899. sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
  2900. if (sas_device) {
  2901. if (priv_target->flags &
  2902. MPT_TARGET_FLAGS_RAID_COMPONENT) {
  2903. starget_printk(KERN_INFO, starget,
  2904. "volume handle(0x%04x), "
  2905. "volume wwid(0x%016llx)\n",
  2906. sas_device->volume_handle,
  2907. (unsigned long long)sas_device->volume_wwid);
  2908. }
  2909. starget_printk(KERN_INFO, starget,
  2910. "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
  2911. sas_device->handle,
  2912. (unsigned long long)sas_device->sas_address,
  2913. sas_device->phy);
  2914. _scsih_display_enclosure_chassis_info(NULL, sas_device,
  2915. NULL, starget);
  2916. sas_device_put(sas_device);
  2917. }
  2918. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  2919. }
  2920. }
  2921. /**
  2922. * scsih_abort - eh threads main abort routine
  2923. * @scmd: pointer to scsi command object
  2924. *
  2925. * Return: SUCCESS if command aborted else FAILED
  2926. */
  2927. static int
  2928. scsih_abort(struct scsi_cmnd *scmd)
  2929. {
  2930. struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
  2931. struct MPT3SAS_DEVICE *sas_device_priv_data;
  2932. struct scsiio_tracker *st = scsi_cmd_priv(scmd);
  2933. u16 handle;
  2934. int r;
  2935. u8 timeout = 30;
  2936. struct _pcie_device *pcie_device = NULL;
  2937. sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
  2938. "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
  2939. scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
  2940. (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
  2941. _scsih_tm_display_info(ioc, scmd);
  2942. sas_device_priv_data = scmd->device->hostdata;
  2943. if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
  2944. ioc->remove_host) {
  2945. sdev_printk(KERN_INFO, scmd->device,
  2946. "device been deleted! scmd(0x%p)\n", scmd);
  2947. scmd->result = DID_NO_CONNECT << 16;
  2948. scsi_done(scmd);
  2949. r = SUCCESS;
  2950. goto out;
  2951. }
  2952. /* check for completed command */
  2953. if (st == NULL || st->cb_idx == 0xFF) {
  2954. sdev_printk(KERN_INFO, scmd->device, "No reference found at "
  2955. "driver, assuming scmd(0x%p) might have completed\n", scmd);
  2956. scmd->result = DID_RESET << 16;
  2957. r = SUCCESS;
  2958. goto out;
  2959. }
  2960. /* for hidden raid components and volumes this is not supported */
  2961. if (sas_device_priv_data->sas_target->flags &
  2962. MPT_TARGET_FLAGS_RAID_COMPONENT ||
  2963. sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
  2964. scmd->result = DID_RESET << 16;
  2965. r = FAILED;
  2966. goto out;
  2967. }
  2968. mpt3sas_halt_firmware(ioc);
  2969. handle = sas_device_priv_data->sas_target->handle;
  2970. pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
  2971. if (pcie_device && (!ioc->tm_custom_handling) &&
  2972. (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
  2973. timeout = ioc->nvme_abort_timeout;
  2974. r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
  2975. scmd->device->id, scmd->device->lun,
  2976. MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
  2977. st->smid, st->msix_io, timeout, 0);
  2978. /* Command must be cleared after abort */
  2979. if (r == SUCCESS && st->cb_idx != 0xFF)
  2980. r = FAILED;
  2981. out:
  2982. sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
  2983. ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
  2984. if (pcie_device)
  2985. pcie_device_put(pcie_device);
  2986. return r;
  2987. }
  2988. /**
  2989. * scsih_dev_reset - eh threads main device reset routine
  2990. * @scmd: pointer to scsi command object
  2991. *
  2992. * Return: SUCCESS if command aborted else FAILED
  2993. */
  2994. static int
  2995. scsih_dev_reset(struct scsi_cmnd *scmd)
  2996. {
  2997. struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
  2998. struct MPT3SAS_DEVICE *sas_device_priv_data;
  2999. struct _sas_device *sas_device = NULL;
  3000. struct _pcie_device *pcie_device = NULL;
  3001. u16 handle;
  3002. u8 tr_method = 0;
  3003. u8 tr_timeout = 30;
  3004. int r;
  3005. struct scsi_target *starget = scmd->device->sdev_target;
  3006. struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
  3007. sdev_printk(KERN_INFO, scmd->device,
  3008. "attempting device reset! scmd(0x%p)\n", scmd);
  3009. _scsih_tm_display_info(ioc, scmd);
  3010. sas_device_priv_data = scmd->device->hostdata;
  3011. if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
  3012. ioc->remove_host) {
  3013. sdev_printk(KERN_INFO, scmd->device,
  3014. "device been deleted! scmd(0x%p)\n", scmd);
  3015. scmd->result = DID_NO_CONNECT << 16;
  3016. scsi_done(scmd);
  3017. r = SUCCESS;
  3018. goto out;
  3019. }
  3020. /* for hidden raid components obtain the volume_handle */
  3021. handle = 0;
  3022. if (sas_device_priv_data->sas_target->flags &
  3023. MPT_TARGET_FLAGS_RAID_COMPONENT) {
  3024. sas_device = mpt3sas_get_sdev_from_target(ioc,
  3025. target_priv_data);
  3026. if (sas_device)
  3027. handle = sas_device->volume_handle;
  3028. } else
  3029. handle = sas_device_priv_data->sas_target->handle;
  3030. if (!handle) {
  3031. scmd->result = DID_RESET << 16;
  3032. r = FAILED;
  3033. goto out;
  3034. }
  3035. pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
  3036. if (pcie_device && (!ioc->tm_custom_handling) &&
  3037. (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
  3038. tr_timeout = pcie_device->reset_timeout;
  3039. tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
  3040. } else
  3041. tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
  3042. r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
  3043. scmd->device->id, scmd->device->lun,
  3044. MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
  3045. tr_timeout, tr_method);
  3046. /* Check for busy commands after reset */
  3047. if (r == SUCCESS && scsi_device_busy(scmd->device))
  3048. r = FAILED;
  3049. out:
  3050. sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
  3051. ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
  3052. if (sas_device)
  3053. sas_device_put(sas_device);
  3054. if (pcie_device)
  3055. pcie_device_put(pcie_device);
  3056. return r;
  3057. }
  3058. /**
  3059. * scsih_target_reset - eh threads main target reset routine
  3060. * @scmd: pointer to scsi command object
  3061. *
  3062. * Return: SUCCESS if command aborted else FAILED
  3063. */
  3064. static int
  3065. scsih_target_reset(struct scsi_cmnd *scmd)
  3066. {
  3067. struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
  3068. struct MPT3SAS_DEVICE *sas_device_priv_data;
  3069. struct _sas_device *sas_device = NULL;
  3070. struct _pcie_device *pcie_device = NULL;
  3071. u16 handle;
  3072. u8 tr_method = 0;
  3073. u8 tr_timeout = 30;
  3074. int r;
  3075. struct scsi_target *starget = scmd->device->sdev_target;
  3076. struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
  3077. starget_printk(KERN_INFO, starget,
  3078. "attempting target reset! scmd(0x%p)\n", scmd);
  3079. _scsih_tm_display_info(ioc, scmd);
  3080. sas_device_priv_data = scmd->device->hostdata;
  3081. if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
  3082. ioc->remove_host) {
  3083. starget_printk(KERN_INFO, starget,
  3084. "target been deleted! scmd(0x%p)\n", scmd);
  3085. scmd->result = DID_NO_CONNECT << 16;
  3086. scsi_done(scmd);
  3087. r = SUCCESS;
  3088. goto out;
  3089. }
  3090. /* for hidden raid components obtain the volume_handle */
  3091. handle = 0;
  3092. if (sas_device_priv_data->sas_target->flags &
  3093. MPT_TARGET_FLAGS_RAID_COMPONENT) {
  3094. sas_device = mpt3sas_get_sdev_from_target(ioc,
  3095. target_priv_data);
  3096. if (sas_device)
  3097. handle = sas_device->volume_handle;
  3098. } else
  3099. handle = sas_device_priv_data->sas_target->handle;
  3100. if (!handle) {
  3101. scmd->result = DID_RESET << 16;
  3102. r = FAILED;
  3103. goto out;
  3104. }
  3105. pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
  3106. if (pcie_device && (!ioc->tm_custom_handling) &&
  3107. (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
  3108. tr_timeout = pcie_device->reset_timeout;
  3109. tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
  3110. } else
  3111. tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
  3112. r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
  3113. scmd->device->id, 0,
  3114. MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
  3115. tr_timeout, tr_method);
  3116. /* Check for busy commands after reset */
  3117. if (r == SUCCESS && atomic_read(&starget->target_busy))
  3118. r = FAILED;
  3119. out:
  3120. starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
  3121. ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
  3122. if (sas_device)
  3123. sas_device_put(sas_device);
  3124. if (pcie_device)
  3125. pcie_device_put(pcie_device);
  3126. return r;
  3127. }
  3128. /**
  3129. * scsih_host_reset - eh threads main host reset routine
  3130. * @scmd: pointer to scsi command object
  3131. *
  3132. * Return: SUCCESS if command aborted else FAILED
  3133. */
  3134. static int
  3135. scsih_host_reset(struct scsi_cmnd *scmd)
  3136. {
  3137. struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
  3138. int r, retval;
  3139. ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
  3140. scsi_print_command(scmd);
  3141. if (ioc->is_driver_loading || ioc->remove_host) {
  3142. ioc_info(ioc, "Blocking the host reset\n");
  3143. r = FAILED;
  3144. goto out;
  3145. }
  3146. retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  3147. r = (retval < 0) ? FAILED : SUCCESS;
  3148. out:
  3149. ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
  3150. r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
  3151. return r;
  3152. }
  3153. /**
  3154. * _scsih_fw_event_add - insert and queue up fw_event
  3155. * @ioc: per adapter object
  3156. * @fw_event: object describing the event
  3157. * Context: This function will acquire ioc->fw_event_lock.
  3158. *
  3159. * This adds the firmware event object into link list, then queues it up to
  3160. * be processed from user context.
  3161. */
  3162. static void
  3163. _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
  3164. {
  3165. unsigned long flags;
  3166. if (ioc->firmware_event_thread == NULL)
  3167. return;
  3168. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  3169. fw_event_work_get(fw_event);
  3170. INIT_LIST_HEAD(&fw_event->list);
  3171. list_add_tail(&fw_event->list, &ioc->fw_event_list);
  3172. INIT_WORK(&fw_event->work, _firmware_event_work);
  3173. fw_event_work_get(fw_event);
  3174. queue_work(ioc->firmware_event_thread, &fw_event->work);
  3175. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  3176. }
  3177. /**
  3178. * _scsih_fw_event_del_from_list - delete fw_event from the list
  3179. * @ioc: per adapter object
  3180. * @fw_event: object describing the event
  3181. * Context: This function will acquire ioc->fw_event_lock.
  3182. *
  3183. * If the fw_event is on the fw_event_list, remove it and do a put.
  3184. */
  3185. static void
  3186. _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
  3187. *fw_event)
  3188. {
  3189. unsigned long flags;
  3190. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  3191. if (!list_empty(&fw_event->list)) {
  3192. list_del_init(&fw_event->list);
  3193. fw_event_work_put(fw_event);
  3194. }
  3195. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  3196. }
  3197. /**
  3198. * mpt3sas_send_trigger_data_event - send event for processing trigger data
  3199. * @ioc: per adapter object
  3200. * @event_data: trigger event data
  3201. */
  3202. void
  3203. mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
  3204. struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
  3205. {
  3206. struct fw_event_work *fw_event;
  3207. u16 sz;
  3208. if (ioc->is_driver_loading)
  3209. return;
  3210. sz = sizeof(*event_data);
  3211. fw_event = alloc_fw_event_work(sz);
  3212. if (!fw_event)
  3213. return;
  3214. fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
  3215. fw_event->ioc = ioc;
  3216. memcpy(fw_event->event_data, event_data, sizeof(*event_data));
  3217. _scsih_fw_event_add(ioc, fw_event);
  3218. fw_event_work_put(fw_event);
  3219. }
  3220. /**
  3221. * _scsih_error_recovery_delete_devices - remove devices not responding
  3222. * @ioc: per adapter object
  3223. */
  3224. static void
  3225. _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
  3226. {
  3227. struct fw_event_work *fw_event;
  3228. fw_event = alloc_fw_event_work(0);
  3229. if (!fw_event)
  3230. return;
  3231. fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
  3232. fw_event->ioc = ioc;
  3233. _scsih_fw_event_add(ioc, fw_event);
  3234. fw_event_work_put(fw_event);
  3235. }
  3236. /**
  3237. * mpt3sas_port_enable_complete - port enable completed (fake event)
  3238. * @ioc: per adapter object
  3239. */
  3240. void
  3241. mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
  3242. {
  3243. struct fw_event_work *fw_event;
  3244. fw_event = alloc_fw_event_work(0);
  3245. if (!fw_event)
  3246. return;
  3247. fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
  3248. fw_event->ioc = ioc;
  3249. _scsih_fw_event_add(ioc, fw_event);
  3250. fw_event_work_put(fw_event);
  3251. }
  3252. static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
  3253. {
  3254. unsigned long flags;
  3255. struct fw_event_work *fw_event = NULL;
  3256. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  3257. if (!list_empty(&ioc->fw_event_list)) {
  3258. fw_event = list_first_entry(&ioc->fw_event_list,
  3259. struct fw_event_work, list);
  3260. list_del_init(&fw_event->list);
  3261. fw_event_work_put(fw_event);
  3262. }
  3263. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  3264. return fw_event;
  3265. }
  3266. /**
  3267. * _scsih_fw_event_cleanup_queue - cleanup event queue
  3268. * @ioc: per adapter object
  3269. *
  3270. * Walk the firmware event queue, either killing timers, or waiting
  3271. * for outstanding events to complete
  3272. *
  3273. * Context: task, can sleep
  3274. */
  3275. static void
  3276. _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
  3277. {
  3278. struct fw_event_work *fw_event;
  3279. if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
  3280. !ioc->firmware_event_thread)
  3281. return;
  3282. /*
  3283. * Set current running event as ignore, so that
  3284. * current running event will exit quickly.
  3285. * As diag reset has occurred it is of no use
  3286. * to process remaining stale event data entries.
  3287. */
  3288. if (ioc->shost_recovery && ioc->current_event)
  3289. ioc->current_event->ignore = 1;
  3290. ioc->fw_events_cleanup = 1;
  3291. while ((fw_event = dequeue_next_fw_event(ioc)) ||
  3292. (fw_event = ioc->current_event)) {
  3293. /*
  3294. * Don't call cancel_work_sync() for current_event
  3295. * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
  3296. * otherwise we may observe deadlock if current
  3297. * hard reset issued as part of processing the current_event.
  3298. *
  3299. * Orginal logic of cleaning the current_event is added
  3300. * for handling the back to back host reset issued by the user.
  3301. * i.e. during back to back host reset, driver use to process
  3302. * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
  3303. * event back to back and this made the drives to unregister
  3304. * the devices from SML.
  3305. */
  3306. if (fw_event == ioc->current_event &&
  3307. ioc->current_event->event !=
  3308. MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
  3309. ioc->current_event = NULL;
  3310. continue;
  3311. }
  3312. /*
  3313. * Driver has to clear ioc->start_scan flag when
  3314. * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE,
  3315. * otherwise scsi_scan_host() API waits for the
  3316. * 5 minute timer to expire. If we exit from
  3317. * scsi_scan_host() early then we can issue the
  3318. * new port enable request as part of current diag reset.
  3319. */
  3320. if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
  3321. ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
  3322. ioc->start_scan = 0;
  3323. }
  3324. /*
  3325. * Wait on the fw_event to complete. If this returns 1, then
  3326. * the event was never executed, and we need a put for the
  3327. * reference the work had on the fw_event.
  3328. *
  3329. * If it did execute, we wait for it to finish, and the put will
  3330. * happen from _firmware_event_work()
  3331. */
  3332. if (cancel_work_sync(&fw_event->work))
  3333. fw_event_work_put(fw_event);
  3334. }
  3335. ioc->fw_events_cleanup = 0;
  3336. }
  3337. /**
  3338. * _scsih_internal_device_block - block the sdev device
  3339. * @sdev: per device object
  3340. * @sas_device_priv_data : per device driver private data
  3341. *
  3342. * make sure device is blocked without error, if not
  3343. * print an error
  3344. */
  3345. static void
  3346. _scsih_internal_device_block(struct scsi_device *sdev,
  3347. struct MPT3SAS_DEVICE *sas_device_priv_data)
  3348. {
  3349. int r = 0;
  3350. sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
  3351. sas_device_priv_data->sas_target->handle);
  3352. sas_device_priv_data->block = 1;
  3353. r = scsi_internal_device_block_nowait(sdev);
  3354. if (r == -EINVAL)
  3355. sdev_printk(KERN_WARNING, sdev,
  3356. "device_block failed with return(%d) for handle(0x%04x)\n",
  3357. r, sas_device_priv_data->sas_target->handle);
  3358. }
  3359. /**
  3360. * _scsih_internal_device_unblock - unblock the sdev device
  3361. * @sdev: per device object
  3362. * @sas_device_priv_data : per device driver private data
  3363. * make sure device is unblocked without error, if not retry
  3364. * by blocking and then unblocking
  3365. */
  3366. static void
  3367. _scsih_internal_device_unblock(struct scsi_device *sdev,
  3368. struct MPT3SAS_DEVICE *sas_device_priv_data)
  3369. {
  3370. int r = 0;
  3371. sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
  3372. "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
  3373. sas_device_priv_data->block = 0;
  3374. r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
  3375. if (r == -EINVAL) {
  3376. /* The device has been set to SDEV_RUNNING by SD layer during
  3377. * device addition but the request queue is still stopped by
  3378. * our earlier block call. We need to perform a block again
  3379. * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
  3380. sdev_printk(KERN_WARNING, sdev,
  3381. "device_unblock failed with return(%d) for handle(0x%04x) "
  3382. "performing a block followed by an unblock\n",
  3383. r, sas_device_priv_data->sas_target->handle);
  3384. sas_device_priv_data->block = 1;
  3385. r = scsi_internal_device_block_nowait(sdev);
  3386. if (r)
  3387. sdev_printk(KERN_WARNING, sdev, "retried device_block "
  3388. "failed with return(%d) for handle(0x%04x)\n",
  3389. r, sas_device_priv_data->sas_target->handle);
  3390. sas_device_priv_data->block = 0;
  3391. r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
  3392. if (r)
  3393. sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
  3394. " failed with return(%d) for handle(0x%04x)\n",
  3395. r, sas_device_priv_data->sas_target->handle);
  3396. }
  3397. }
  3398. /**
  3399. * _scsih_ublock_io_all_device - unblock every device
  3400. * @ioc: per adapter object
  3401. *
  3402. * change the device state from block to running
  3403. */
  3404. static void
  3405. _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
  3406. {
  3407. struct MPT3SAS_DEVICE *sas_device_priv_data;
  3408. struct scsi_device *sdev;
  3409. shost_for_each_device(sdev, ioc->shost) {
  3410. sas_device_priv_data = sdev->hostdata;
  3411. if (!sas_device_priv_data)
  3412. continue;
  3413. if (!sas_device_priv_data->block)
  3414. continue;
  3415. dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
  3416. "device_running, handle(0x%04x)\n",
  3417. sas_device_priv_data->sas_target->handle));
  3418. _scsih_internal_device_unblock(sdev, sas_device_priv_data);
  3419. }
  3420. }
  3421. /**
  3422. * _scsih_ublock_io_device - prepare device to be deleted
  3423. * @ioc: per adapter object
  3424. * @sas_address: sas address
  3425. * @port: hba port entry
  3426. *
  3427. * unblock then put device in offline state
  3428. */
  3429. static void
  3430. _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
  3431. u64 sas_address, struct hba_port *port)
  3432. {
  3433. struct MPT3SAS_DEVICE *sas_device_priv_data;
  3434. struct scsi_device *sdev;
  3435. shost_for_each_device(sdev, ioc->shost) {
  3436. sas_device_priv_data = sdev->hostdata;
  3437. if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
  3438. continue;
  3439. if (sas_device_priv_data->sas_target->sas_address
  3440. != sas_address)
  3441. continue;
  3442. if (sas_device_priv_data->sas_target->port != port)
  3443. continue;
  3444. if (sas_device_priv_data->block)
  3445. _scsih_internal_device_unblock(sdev,
  3446. sas_device_priv_data);
  3447. }
  3448. }
  3449. /**
  3450. * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
  3451. * @ioc: per adapter object
  3452. *
  3453. * During device pull we need to appropriately set the sdev state.
  3454. */
  3455. static void
  3456. _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
  3457. {
  3458. struct MPT3SAS_DEVICE *sas_device_priv_data;
  3459. struct scsi_device *sdev;
  3460. shost_for_each_device(sdev, ioc->shost) {
  3461. sas_device_priv_data = sdev->hostdata;
  3462. if (!sas_device_priv_data)
  3463. continue;
  3464. if (sas_device_priv_data->block)
  3465. continue;
  3466. if (sas_device_priv_data->ignore_delay_remove) {
  3467. sdev_printk(KERN_INFO, sdev,
  3468. "%s skip device_block for SES handle(0x%04x)\n",
  3469. __func__, sas_device_priv_data->sas_target->handle);
  3470. continue;
  3471. }
  3472. _scsih_internal_device_block(sdev, sas_device_priv_data);
  3473. }
  3474. }
  3475. /**
  3476. * _scsih_block_io_device - set the device state to SDEV_BLOCK
  3477. * @ioc: per adapter object
  3478. * @handle: device handle
  3479. *
  3480. * During device pull we need to appropriately set the sdev state.
  3481. */
  3482. static void
  3483. _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  3484. {
  3485. struct MPT3SAS_DEVICE *sas_device_priv_data;
  3486. struct scsi_device *sdev;
  3487. struct _sas_device *sas_device;
  3488. sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
  3489. shost_for_each_device(sdev, ioc->shost) {
  3490. sas_device_priv_data = sdev->hostdata;
  3491. if (!sas_device_priv_data)
  3492. continue;
  3493. if (sas_device_priv_data->sas_target->handle != handle)
  3494. continue;
  3495. if (sas_device_priv_data->block)
  3496. continue;
  3497. if (sas_device && sas_device->pend_sas_rphy_add)
  3498. continue;
  3499. if (sas_device_priv_data->ignore_delay_remove) {
  3500. sdev_printk(KERN_INFO, sdev,
  3501. "%s skip device_block for SES handle(0x%04x)\n",
  3502. __func__, sas_device_priv_data->sas_target->handle);
  3503. continue;
  3504. }
  3505. _scsih_internal_device_block(sdev, sas_device_priv_data);
  3506. }
  3507. if (sas_device)
  3508. sas_device_put(sas_device);
  3509. }
  3510. /**
  3511. * _scsih_block_io_to_children_attached_to_ex
  3512. * @ioc: per adapter object
  3513. * @sas_expander: the sas_device object
  3514. *
  3515. * This routine set sdev state to SDEV_BLOCK for all devices
  3516. * attached to this expander. This function called when expander is
  3517. * pulled.
  3518. */
  3519. static void
  3520. _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
  3521. struct _sas_node *sas_expander)
  3522. {
  3523. struct _sas_port *mpt3sas_port;
  3524. struct _sas_device *sas_device;
  3525. struct _sas_node *expander_sibling;
  3526. unsigned long flags;
  3527. if (!sas_expander)
  3528. return;
  3529. list_for_each_entry(mpt3sas_port,
  3530. &sas_expander->sas_port_list, port_list) {
  3531. if (mpt3sas_port->remote_identify.device_type ==
  3532. SAS_END_DEVICE) {
  3533. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  3534. sas_device = __mpt3sas_get_sdev_by_addr(ioc,
  3535. mpt3sas_port->remote_identify.sas_address,
  3536. mpt3sas_port->hba_port);
  3537. if (sas_device) {
  3538. set_bit(sas_device->handle,
  3539. ioc->blocking_handles);
  3540. sas_device_put(sas_device);
  3541. }
  3542. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  3543. }
  3544. }
  3545. list_for_each_entry(mpt3sas_port,
  3546. &sas_expander->sas_port_list, port_list) {
  3547. if (mpt3sas_port->remote_identify.device_type ==
  3548. SAS_EDGE_EXPANDER_DEVICE ||
  3549. mpt3sas_port->remote_identify.device_type ==
  3550. SAS_FANOUT_EXPANDER_DEVICE) {
  3551. expander_sibling =
  3552. mpt3sas_scsih_expander_find_by_sas_address(
  3553. ioc, mpt3sas_port->remote_identify.sas_address,
  3554. mpt3sas_port->hba_port);
  3555. _scsih_block_io_to_children_attached_to_ex(ioc,
  3556. expander_sibling);
  3557. }
  3558. }
  3559. }
  3560. /**
  3561. * _scsih_block_io_to_children_attached_directly
  3562. * @ioc: per adapter object
  3563. * @event_data: topology change event data
  3564. *
  3565. * This routine set sdev state to SDEV_BLOCK for all devices
  3566. * direct attached during device pull.
  3567. */
  3568. static void
  3569. _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
  3570. Mpi2EventDataSasTopologyChangeList_t *event_data)
  3571. {
  3572. int i;
  3573. u16 handle;
  3574. u16 reason_code;
  3575. for (i = 0; i < event_data->NumEntries; i++) {
  3576. handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
  3577. if (!handle)
  3578. continue;
  3579. reason_code = event_data->PHY[i].PhyStatus &
  3580. MPI2_EVENT_SAS_TOPO_RC_MASK;
  3581. if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
  3582. _scsih_block_io_device(ioc, handle);
  3583. }
  3584. }
  3585. /**
  3586. * _scsih_block_io_to_pcie_children_attached_directly
  3587. * @ioc: per adapter object
  3588. * @event_data: topology change event data
  3589. *
  3590. * This routine set sdev state to SDEV_BLOCK for all devices
  3591. * direct attached during device pull/reconnect.
  3592. */
  3593. static void
  3594. _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
  3595. Mpi26EventDataPCIeTopologyChangeList_t *event_data)
  3596. {
  3597. int i;
  3598. u16 handle;
  3599. u16 reason_code;
  3600. for (i = 0; i < event_data->NumEntries; i++) {
  3601. handle =
  3602. le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
  3603. if (!handle)
  3604. continue;
  3605. reason_code = event_data->PortEntry[i].PortStatus;
  3606. if (reason_code ==
  3607. MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
  3608. _scsih_block_io_device(ioc, handle);
  3609. }
  3610. }
  3611. /**
  3612. * _scsih_tm_tr_send - send task management request
  3613. * @ioc: per adapter object
  3614. * @handle: device handle
  3615. * Context: interrupt time.
  3616. *
  3617. * This code is to initiate the device removal handshake protocol
  3618. * with controller firmware. This function will issue target reset
  3619. * using high priority request queue. It will send a sas iounit
  3620. * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
  3621. *
  3622. * This is designed to send muliple task management request at the same
  3623. * time to the fifo. If the fifo is full, we will append the request,
  3624. * and process it in a future completion.
  3625. */
  3626. static void
  3627. _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  3628. {
  3629. Mpi2SCSITaskManagementRequest_t *mpi_request;
  3630. u16 smid;
  3631. struct _sas_device *sas_device = NULL;
  3632. struct _pcie_device *pcie_device = NULL;
  3633. struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
  3634. u64 sas_address = 0;
  3635. unsigned long flags;
  3636. struct _tr_list *delayed_tr;
  3637. u32 ioc_state;
  3638. u8 tr_method = 0;
  3639. struct hba_port *port = NULL;
  3640. if (ioc->pci_error_recovery) {
  3641. dewtprintk(ioc,
  3642. ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
  3643. __func__, handle));
  3644. return;
  3645. }
  3646. ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
  3647. if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
  3648. dewtprintk(ioc,
  3649. ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
  3650. __func__, handle));
  3651. return;
  3652. }
  3653. /* if PD, then return */
  3654. if (test_bit(handle, ioc->pd_handles))
  3655. return;
  3656. clear_bit(handle, ioc->pend_os_device_add);
  3657. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  3658. sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
  3659. if (sas_device && sas_device->starget &&
  3660. sas_device->starget->hostdata) {
  3661. sas_target_priv_data = sas_device->starget->hostdata;
  3662. sas_target_priv_data->deleted = 1;
  3663. sas_address = sas_device->sas_address;
  3664. port = sas_device->port;
  3665. }
  3666. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  3667. if (!sas_device) {
  3668. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  3669. pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
  3670. if (pcie_device && pcie_device->starget &&
  3671. pcie_device->starget->hostdata) {
  3672. sas_target_priv_data = pcie_device->starget->hostdata;
  3673. sas_target_priv_data->deleted = 1;
  3674. sas_address = pcie_device->wwid;
  3675. }
  3676. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  3677. if (pcie_device && (!ioc->tm_custom_handling) &&
  3678. (!(mpt3sas_scsih_is_pcie_scsi_device(
  3679. pcie_device->device_info))))
  3680. tr_method =
  3681. MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
  3682. else
  3683. tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
  3684. }
  3685. if (sas_target_priv_data) {
  3686. dewtprintk(ioc,
  3687. ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
  3688. handle, (u64)sas_address));
  3689. if (sas_device) {
  3690. if (sas_device->enclosure_handle != 0)
  3691. dewtprintk(ioc,
  3692. ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
  3693. (u64)sas_device->enclosure_logical_id,
  3694. sas_device->slot));
  3695. if (sas_device->connector_name[0] != '\0')
  3696. dewtprintk(ioc,
  3697. ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
  3698. sas_device->enclosure_level,
  3699. sas_device->connector_name));
  3700. } else if (pcie_device) {
  3701. if (pcie_device->enclosure_handle != 0)
  3702. dewtprintk(ioc,
  3703. ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
  3704. (u64)pcie_device->enclosure_logical_id,
  3705. pcie_device->slot));
  3706. if (pcie_device->connector_name[0] != '\0')
  3707. dewtprintk(ioc,
  3708. ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
  3709. pcie_device->enclosure_level,
  3710. pcie_device->connector_name));
  3711. }
  3712. _scsih_ublock_io_device(ioc, sas_address, port);
  3713. sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
  3714. }
  3715. smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
  3716. if (!smid) {
  3717. delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
  3718. if (!delayed_tr)
  3719. goto out;
  3720. INIT_LIST_HEAD(&delayed_tr->list);
  3721. delayed_tr->handle = handle;
  3722. list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
  3723. dewtprintk(ioc,
  3724. ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
  3725. handle));
  3726. goto out;
  3727. }
  3728. dewtprintk(ioc,
  3729. ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
  3730. handle, smid, ioc->tm_tr_cb_idx));
  3731. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  3732. memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
  3733. mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
  3734. mpi_request->DevHandle = cpu_to_le16(handle);
  3735. mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
  3736. mpi_request->MsgFlags = tr_method;
  3737. set_bit(handle, ioc->device_remove_in_progress);
  3738. ioc->put_smid_hi_priority(ioc, smid, 0);
  3739. mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
  3740. out:
  3741. if (sas_device)
  3742. sas_device_put(sas_device);
  3743. if (pcie_device)
  3744. pcie_device_put(pcie_device);
  3745. }
  3746. /**
  3747. * _scsih_tm_tr_complete -
  3748. * @ioc: per adapter object
  3749. * @smid: system request message index
  3750. * @msix_index: MSIX table index supplied by the OS
  3751. * @reply: reply message frame(lower 32bit addr)
  3752. * Context: interrupt time.
  3753. *
  3754. * This is the target reset completion routine.
  3755. * This code is part of the code to initiate the device removal
  3756. * handshake protocol with controller firmware.
  3757. * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
  3758. *
  3759. * Return: 1 meaning mf should be freed from _base_interrupt
  3760. * 0 means the mf is freed from this function.
  3761. */
  3762. static u8
  3763. _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
  3764. u32 reply)
  3765. {
  3766. u16 handle;
  3767. Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
  3768. Mpi2SCSITaskManagementReply_t *mpi_reply =
  3769. mpt3sas_base_get_reply_virt_addr(ioc, reply);
  3770. Mpi2SasIoUnitControlRequest_t *mpi_request;
  3771. u16 smid_sas_ctrl;
  3772. u32 ioc_state;
  3773. struct _sc_list *delayed_sc;
  3774. if (ioc->pci_error_recovery) {
  3775. dewtprintk(ioc,
  3776. ioc_info(ioc, "%s: host in pci error recovery\n",
  3777. __func__));
  3778. return 1;
  3779. }
  3780. ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
  3781. if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
  3782. dewtprintk(ioc,
  3783. ioc_info(ioc, "%s: host is not operational\n",
  3784. __func__));
  3785. return 1;
  3786. }
  3787. if (unlikely(!mpi_reply)) {
  3788. ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
  3789. __FILE__, __LINE__, __func__);
  3790. return 1;
  3791. }
  3792. mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
  3793. handle = le16_to_cpu(mpi_request_tm->DevHandle);
  3794. if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
  3795. dewtprintk(ioc,
  3796. ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
  3797. handle,
  3798. le16_to_cpu(mpi_reply->DevHandle), smid));
  3799. return 0;
  3800. }
  3801. mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
  3802. dewtprintk(ioc,
  3803. ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
  3804. handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
  3805. le32_to_cpu(mpi_reply->IOCLogInfo),
  3806. le32_to_cpu(mpi_reply->TerminationCount)));
  3807. smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
  3808. if (!smid_sas_ctrl) {
  3809. delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
  3810. if (!delayed_sc)
  3811. return _scsih_check_for_pending_tm(ioc, smid);
  3812. INIT_LIST_HEAD(&delayed_sc->list);
  3813. delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
  3814. list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
  3815. dewtprintk(ioc,
  3816. ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
  3817. handle));
  3818. return _scsih_check_for_pending_tm(ioc, smid);
  3819. }
  3820. dewtprintk(ioc,
  3821. ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
  3822. handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
  3823. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
  3824. memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
  3825. mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
  3826. mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
  3827. mpi_request->DevHandle = mpi_request_tm->DevHandle;
  3828. ioc->put_smid_default(ioc, smid_sas_ctrl);
  3829. return _scsih_check_for_pending_tm(ioc, smid);
  3830. }
  3831. /** _scsih_allow_scmd_to_device - check whether scmd needs to
  3832. * issue to IOC or not.
  3833. * @ioc: per adapter object
  3834. * @scmd: pointer to scsi command object
  3835. *
  3836. * Returns true if scmd can be issued to IOC otherwise returns false.
  3837. */
  3838. inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
  3839. struct scsi_cmnd *scmd)
  3840. {
  3841. if (ioc->pci_error_recovery)
  3842. return false;
  3843. if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
  3844. if (ioc->remove_host)
  3845. return false;
  3846. return true;
  3847. }
  3848. if (ioc->remove_host) {
  3849. switch (scmd->cmnd[0]) {
  3850. case SYNCHRONIZE_CACHE:
  3851. case START_STOP:
  3852. return true;
  3853. default:
  3854. return false;
  3855. }
  3856. }
  3857. return true;
  3858. }
  3859. /**
  3860. * _scsih_sas_control_complete - completion routine
  3861. * @ioc: per adapter object
  3862. * @smid: system request message index
  3863. * @msix_index: MSIX table index supplied by the OS
  3864. * @reply: reply message frame(lower 32bit addr)
  3865. * Context: interrupt time.
  3866. *
  3867. * This is the sas iounit control completion routine.
  3868. * This code is part of the code to initiate the device removal
  3869. * handshake protocol with controller firmware.
  3870. *
  3871. * Return: 1 meaning mf should be freed from _base_interrupt
  3872. * 0 means the mf is freed from this function.
  3873. */
  3874. static u8
  3875. _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  3876. u8 msix_index, u32 reply)
  3877. {
  3878. Mpi2SasIoUnitControlReply_t *mpi_reply =
  3879. mpt3sas_base_get_reply_virt_addr(ioc, reply);
  3880. if (likely(mpi_reply)) {
  3881. dewtprintk(ioc,
  3882. ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
  3883. le16_to_cpu(mpi_reply->DevHandle), smid,
  3884. le16_to_cpu(mpi_reply->IOCStatus),
  3885. le32_to_cpu(mpi_reply->IOCLogInfo)));
  3886. if (le16_to_cpu(mpi_reply->IOCStatus) ==
  3887. MPI2_IOCSTATUS_SUCCESS) {
  3888. clear_bit(le16_to_cpu(mpi_reply->DevHandle),
  3889. ioc->device_remove_in_progress);
  3890. }
  3891. } else {
  3892. ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
  3893. __FILE__, __LINE__, __func__);
  3894. }
  3895. return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
  3896. }
  3897. /**
  3898. * _scsih_tm_tr_volume_send - send target reset request for volumes
  3899. * @ioc: per adapter object
  3900. * @handle: device handle
  3901. * Context: interrupt time.
  3902. *
  3903. * This is designed to send muliple task management request at the same
  3904. * time to the fifo. If the fifo is full, we will append the request,
  3905. * and process it in a future completion.
  3906. */
  3907. static void
  3908. _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  3909. {
  3910. Mpi2SCSITaskManagementRequest_t *mpi_request;
  3911. u16 smid;
  3912. struct _tr_list *delayed_tr;
  3913. if (ioc->pci_error_recovery) {
  3914. dewtprintk(ioc,
  3915. ioc_info(ioc, "%s: host reset in progress!\n",
  3916. __func__));
  3917. return;
  3918. }
  3919. smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
  3920. if (!smid) {
  3921. delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
  3922. if (!delayed_tr)
  3923. return;
  3924. INIT_LIST_HEAD(&delayed_tr->list);
  3925. delayed_tr->handle = handle;
  3926. list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
  3927. dewtprintk(ioc,
  3928. ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
  3929. handle));
  3930. return;
  3931. }
  3932. dewtprintk(ioc,
  3933. ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
  3934. handle, smid, ioc->tm_tr_volume_cb_idx));
  3935. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  3936. memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
  3937. mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
  3938. mpi_request->DevHandle = cpu_to_le16(handle);
  3939. mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
  3940. ioc->put_smid_hi_priority(ioc, smid, 0);
  3941. }
  3942. /**
  3943. * _scsih_tm_volume_tr_complete - target reset completion
  3944. * @ioc: per adapter object
  3945. * @smid: system request message index
  3946. * @msix_index: MSIX table index supplied by the OS
  3947. * @reply: reply message frame(lower 32bit addr)
  3948. * Context: interrupt time.
  3949. *
  3950. * Return: 1 meaning mf should be freed from _base_interrupt
  3951. * 0 means the mf is freed from this function.
  3952. */
  3953. static u8
  3954. _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  3955. u8 msix_index, u32 reply)
  3956. {
  3957. u16 handle;
  3958. Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
  3959. Mpi2SCSITaskManagementReply_t *mpi_reply =
  3960. mpt3sas_base_get_reply_virt_addr(ioc, reply);
  3961. if (ioc->shost_recovery || ioc->pci_error_recovery) {
  3962. dewtprintk(ioc,
  3963. ioc_info(ioc, "%s: host reset in progress!\n",
  3964. __func__));
  3965. return 1;
  3966. }
  3967. if (unlikely(!mpi_reply)) {
  3968. ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
  3969. __FILE__, __LINE__, __func__);
  3970. return 1;
  3971. }
  3972. mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
  3973. handle = le16_to_cpu(mpi_request_tm->DevHandle);
  3974. if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
  3975. dewtprintk(ioc,
  3976. ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
  3977. handle, le16_to_cpu(mpi_reply->DevHandle),
  3978. smid));
  3979. return 0;
  3980. }
  3981. dewtprintk(ioc,
  3982. ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
  3983. handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
  3984. le32_to_cpu(mpi_reply->IOCLogInfo),
  3985. le32_to_cpu(mpi_reply->TerminationCount)));
  3986. return _scsih_check_for_pending_tm(ioc, smid);
  3987. }
  3988. /**
  3989. * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
  3990. * @ioc: per adapter object
  3991. * @smid: system request message index
  3992. * @event: Event ID
  3993. * @event_context: used to track events uniquely
  3994. *
  3995. * Context - processed in interrupt context.
  3996. */
  3997. static void
  3998. _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
  3999. U32 event_context)
  4000. {
  4001. Mpi2EventAckRequest_t *ack_request;
  4002. int i = smid - ioc->internal_smid;
  4003. unsigned long flags;
  4004. /* Without releasing the smid just update the
  4005. * call back index and reuse the same smid for
  4006. * processing this delayed request
  4007. */
  4008. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  4009. ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
  4010. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  4011. dewtprintk(ioc,
  4012. ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
  4013. le16_to_cpu(event), smid, ioc->base_cb_idx));
  4014. ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
  4015. memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
  4016. ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
  4017. ack_request->Event = event;
  4018. ack_request->EventContext = event_context;
  4019. ack_request->VF_ID = 0; /* TODO */
  4020. ack_request->VP_ID = 0;
  4021. ioc->put_smid_default(ioc, smid);
  4022. }
  4023. /**
  4024. * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
  4025. * sas_io_unit_ctrl messages
  4026. * @ioc: per adapter object
  4027. * @smid: system request message index
  4028. * @handle: device handle
  4029. *
  4030. * Context - processed in interrupt context.
  4031. */
  4032. static void
  4033. _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
  4034. u16 smid, u16 handle)
  4035. {
  4036. Mpi2SasIoUnitControlRequest_t *mpi_request;
  4037. u32 ioc_state;
  4038. int i = smid - ioc->internal_smid;
  4039. unsigned long flags;
  4040. if (ioc->remove_host) {
  4041. dewtprintk(ioc,
  4042. ioc_info(ioc, "%s: host has been removed\n",
  4043. __func__));
  4044. return;
  4045. } else if (ioc->pci_error_recovery) {
  4046. dewtprintk(ioc,
  4047. ioc_info(ioc, "%s: host in pci error recovery\n",
  4048. __func__));
  4049. return;
  4050. }
  4051. ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
  4052. if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
  4053. dewtprintk(ioc,
  4054. ioc_info(ioc, "%s: host is not operational\n",
  4055. __func__));
  4056. return;
  4057. }
  4058. /* Without releasing the smid just update the
  4059. * call back index and reuse the same smid for
  4060. * processing this delayed request
  4061. */
  4062. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  4063. ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
  4064. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  4065. dewtprintk(ioc,
  4066. ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
  4067. handle, smid, ioc->tm_sas_control_cb_idx));
  4068. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  4069. memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
  4070. mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
  4071. mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
  4072. mpi_request->DevHandle = cpu_to_le16(handle);
  4073. ioc->put_smid_default(ioc, smid);
  4074. }
  4075. /**
  4076. * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
  4077. * @ioc: per adapter object
  4078. * @smid: system request message index
  4079. *
  4080. * Context: Executed in interrupt context
  4081. *
  4082. * This will check delayed internal messages list, and process the
  4083. * next request.
  4084. *
  4085. * Return: 1 meaning mf should be freed from _base_interrupt
  4086. * 0 means the mf is freed from this function.
  4087. */
  4088. u8
  4089. mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  4090. {
  4091. struct _sc_list *delayed_sc;
  4092. struct _event_ack_list *delayed_event_ack;
  4093. if (!list_empty(&ioc->delayed_event_ack_list)) {
  4094. delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
  4095. struct _event_ack_list, list);
  4096. _scsih_issue_delayed_event_ack(ioc, smid,
  4097. delayed_event_ack->Event, delayed_event_ack->EventContext);
  4098. list_del(&delayed_event_ack->list);
  4099. kfree(delayed_event_ack);
  4100. return 0;
  4101. }
  4102. if (!list_empty(&ioc->delayed_sc_list)) {
  4103. delayed_sc = list_entry(ioc->delayed_sc_list.next,
  4104. struct _sc_list, list);
  4105. _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
  4106. delayed_sc->handle);
  4107. list_del(&delayed_sc->list);
  4108. kfree(delayed_sc);
  4109. return 0;
  4110. }
  4111. return 1;
  4112. }
  4113. /**
  4114. * _scsih_check_for_pending_tm - check for pending task management
  4115. * @ioc: per adapter object
  4116. * @smid: system request message index
  4117. *
  4118. * This will check delayed target reset list, and feed the
  4119. * next reqeust.
  4120. *
  4121. * Return: 1 meaning mf should be freed from _base_interrupt
  4122. * 0 means the mf is freed from this function.
  4123. */
  4124. static u8
  4125. _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  4126. {
  4127. struct _tr_list *delayed_tr;
  4128. if (!list_empty(&ioc->delayed_tr_volume_list)) {
  4129. delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
  4130. struct _tr_list, list);
  4131. mpt3sas_base_free_smid(ioc, smid);
  4132. _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
  4133. list_del(&delayed_tr->list);
  4134. kfree(delayed_tr);
  4135. return 0;
  4136. }
  4137. if (!list_empty(&ioc->delayed_tr_list)) {
  4138. delayed_tr = list_entry(ioc->delayed_tr_list.next,
  4139. struct _tr_list, list);
  4140. mpt3sas_base_free_smid(ioc, smid);
  4141. _scsih_tm_tr_send(ioc, delayed_tr->handle);
  4142. list_del(&delayed_tr->list);
  4143. kfree(delayed_tr);
  4144. return 0;
  4145. }
  4146. return 1;
  4147. }
  4148. /**
  4149. * _scsih_check_topo_delete_events - sanity check on topo events
  4150. * @ioc: per adapter object
  4151. * @event_data: the event data payload
  4152. *
  4153. * This routine added to better handle cable breaker.
  4154. *
  4155. * This handles the case where driver receives multiple expander
  4156. * add and delete events in a single shot. When there is a delete event
  4157. * the routine will void any pending add events waiting in the event queue.
  4158. */
  4159. static void
  4160. _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
  4161. Mpi2EventDataSasTopologyChangeList_t *event_data)
  4162. {
  4163. struct fw_event_work *fw_event;
  4164. Mpi2EventDataSasTopologyChangeList_t *local_event_data;
  4165. u16 expander_handle;
  4166. struct _sas_node *sas_expander;
  4167. unsigned long flags;
  4168. int i, reason_code;
  4169. u16 handle;
  4170. for (i = 0 ; i < event_data->NumEntries; i++) {
  4171. handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
  4172. if (!handle)
  4173. continue;
  4174. reason_code = event_data->PHY[i].PhyStatus &
  4175. MPI2_EVENT_SAS_TOPO_RC_MASK;
  4176. if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
  4177. _scsih_tm_tr_send(ioc, handle);
  4178. }
  4179. expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
  4180. if (expander_handle < ioc->sas_hba.num_phys) {
  4181. _scsih_block_io_to_children_attached_directly(ioc, event_data);
  4182. return;
  4183. }
  4184. if (event_data->ExpStatus ==
  4185. MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
  4186. /* put expander attached devices into blocking state */
  4187. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  4188. sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
  4189. expander_handle);
  4190. _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
  4191. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  4192. do {
  4193. handle = find_first_bit(ioc->blocking_handles,
  4194. ioc->facts.MaxDevHandle);
  4195. if (handle < ioc->facts.MaxDevHandle)
  4196. _scsih_block_io_device(ioc, handle);
  4197. } while (test_and_clear_bit(handle, ioc->blocking_handles));
  4198. } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
  4199. _scsih_block_io_to_children_attached_directly(ioc, event_data);
  4200. if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
  4201. return;
  4202. /* mark ignore flag for pending events */
  4203. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  4204. list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
  4205. if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
  4206. fw_event->ignore)
  4207. continue;
  4208. local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
  4209. fw_event->event_data;
  4210. if (local_event_data->ExpStatus ==
  4211. MPI2_EVENT_SAS_TOPO_ES_ADDED ||
  4212. local_event_data->ExpStatus ==
  4213. MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
  4214. if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
  4215. expander_handle) {
  4216. dewtprintk(ioc,
  4217. ioc_info(ioc, "setting ignoring flag\n"));
  4218. fw_event->ignore = 1;
  4219. }
  4220. }
  4221. }
  4222. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  4223. }
  4224. /**
  4225. * _scsih_check_pcie_topo_remove_events - sanity check on topo
  4226. * events
  4227. * @ioc: per adapter object
  4228. * @event_data: the event data payload
  4229. *
  4230. * This handles the case where driver receives multiple switch
  4231. * or device add and delete events in a single shot. When there
  4232. * is a delete event the routine will void any pending add
  4233. * events waiting in the event queue.
  4234. */
  4235. static void
  4236. _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
  4237. Mpi26EventDataPCIeTopologyChangeList_t *event_data)
  4238. {
  4239. struct fw_event_work *fw_event;
  4240. Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
  4241. unsigned long flags;
  4242. int i, reason_code;
  4243. u16 handle, switch_handle;
  4244. for (i = 0; i < event_data->NumEntries; i++) {
  4245. handle =
  4246. le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
  4247. if (!handle)
  4248. continue;
  4249. reason_code = event_data->PortEntry[i].PortStatus;
  4250. if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
  4251. _scsih_tm_tr_send(ioc, handle);
  4252. }
  4253. switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
  4254. if (!switch_handle) {
  4255. _scsih_block_io_to_pcie_children_attached_directly(
  4256. ioc, event_data);
  4257. return;
  4258. }
  4259. /* TODO We are not supporting cascaded PCIe Switch removal yet*/
  4260. if ((event_data->SwitchStatus
  4261. == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
  4262. (event_data->SwitchStatus ==
  4263. MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
  4264. _scsih_block_io_to_pcie_children_attached_directly(
  4265. ioc, event_data);
  4266. if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
  4267. return;
  4268. /* mark ignore flag for pending events */
  4269. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  4270. list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
  4271. if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
  4272. fw_event->ignore)
  4273. continue;
  4274. local_event_data =
  4275. (Mpi26EventDataPCIeTopologyChangeList_t *)
  4276. fw_event->event_data;
  4277. if (local_event_data->SwitchStatus ==
  4278. MPI2_EVENT_SAS_TOPO_ES_ADDED ||
  4279. local_event_data->SwitchStatus ==
  4280. MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
  4281. if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
  4282. switch_handle) {
  4283. dewtprintk(ioc,
  4284. ioc_info(ioc, "setting ignoring flag for switch event\n"));
  4285. fw_event->ignore = 1;
  4286. }
  4287. }
  4288. }
  4289. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  4290. }
  4291. /**
  4292. * _scsih_set_volume_delete_flag - setting volume delete flag
  4293. * @ioc: per adapter object
  4294. * @handle: device handle
  4295. *
  4296. * This returns nothing.
  4297. */
  4298. static void
  4299. _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  4300. {
  4301. struct _raid_device *raid_device;
  4302. struct MPT3SAS_TARGET *sas_target_priv_data;
  4303. unsigned long flags;
  4304. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  4305. raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
  4306. if (raid_device && raid_device->starget &&
  4307. raid_device->starget->hostdata) {
  4308. sas_target_priv_data =
  4309. raid_device->starget->hostdata;
  4310. sas_target_priv_data->deleted = 1;
  4311. dewtprintk(ioc,
  4312. ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
  4313. handle, (u64)raid_device->wwid));
  4314. }
  4315. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  4316. }
  4317. /**
  4318. * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
  4319. * @handle: input handle
  4320. * @a: handle for volume a
  4321. * @b: handle for volume b
  4322. *
  4323. * IR firmware only supports two raid volumes. The purpose of this
  4324. * routine is to set the volume handle in either a or b. When the given
  4325. * input handle is non-zero, or when a and b have not been set before.
  4326. */
  4327. static void
  4328. _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
  4329. {
  4330. if (!handle || handle == *a || handle == *b)
  4331. return;
  4332. if (!*a)
  4333. *a = handle;
  4334. else if (!*b)
  4335. *b = handle;
  4336. }
  4337. /**
  4338. * _scsih_check_ir_config_unhide_events - check for UNHIDE events
  4339. * @ioc: per adapter object
  4340. * @event_data: the event data payload
  4341. * Context: interrupt time.
  4342. *
  4343. * This routine will send target reset to volume, followed by target
  4344. * resets to the PDs. This is called when a PD has been removed, or
  4345. * volume has been deleted or removed. When the target reset is sent
  4346. * to volume, the PD target resets need to be queued to start upon
  4347. * completion of the volume target reset.
  4348. */
  4349. static void
  4350. _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
  4351. Mpi2EventDataIrConfigChangeList_t *event_data)
  4352. {
  4353. Mpi2EventIrConfigElement_t *element;
  4354. int i;
  4355. u16 handle, volume_handle, a, b;
  4356. struct _tr_list *delayed_tr;
  4357. a = 0;
  4358. b = 0;
  4359. if (ioc->is_warpdrive)
  4360. return;
  4361. /* Volume Resets for Deleted or Removed */
  4362. element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
  4363. for (i = 0; i < event_data->NumElements; i++, element++) {
  4364. if (le32_to_cpu(event_data->Flags) &
  4365. MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
  4366. continue;
  4367. if (element->ReasonCode ==
  4368. MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
  4369. element->ReasonCode ==
  4370. MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
  4371. volume_handle = le16_to_cpu(element->VolDevHandle);
  4372. _scsih_set_volume_delete_flag(ioc, volume_handle);
  4373. _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
  4374. }
  4375. }
  4376. /* Volume Resets for UNHIDE events */
  4377. element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
  4378. for (i = 0; i < event_data->NumElements; i++, element++) {
  4379. if (le32_to_cpu(event_data->Flags) &
  4380. MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
  4381. continue;
  4382. if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
  4383. volume_handle = le16_to_cpu(element->VolDevHandle);
  4384. _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
  4385. }
  4386. }
  4387. if (a)
  4388. _scsih_tm_tr_volume_send(ioc, a);
  4389. if (b)
  4390. _scsih_tm_tr_volume_send(ioc, b);
  4391. /* PD target resets */
  4392. element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
  4393. for (i = 0; i < event_data->NumElements; i++, element++) {
  4394. if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
  4395. continue;
  4396. handle = le16_to_cpu(element->PhysDiskDevHandle);
  4397. volume_handle = le16_to_cpu(element->VolDevHandle);
  4398. clear_bit(handle, ioc->pd_handles);
  4399. if (!volume_handle)
  4400. _scsih_tm_tr_send(ioc, handle);
  4401. else if (volume_handle == a || volume_handle == b) {
  4402. delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
  4403. BUG_ON(!delayed_tr);
  4404. INIT_LIST_HEAD(&delayed_tr->list);
  4405. delayed_tr->handle = handle;
  4406. list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
  4407. dewtprintk(ioc,
  4408. ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
  4409. handle));
  4410. } else
  4411. _scsih_tm_tr_send(ioc, handle);
  4412. }
  4413. }
  4414. /**
  4415. * _scsih_check_volume_delete_events - set delete flag for volumes
  4416. * @ioc: per adapter object
  4417. * @event_data: the event data payload
  4418. * Context: interrupt time.
  4419. *
  4420. * This will handle the case when the cable connected to entire volume is
  4421. * pulled. We will take care of setting the deleted flag so normal IO will
  4422. * not be sent.
  4423. */
  4424. static void
  4425. _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
  4426. Mpi2EventDataIrVolume_t *event_data)
  4427. {
  4428. u32 state;
  4429. if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
  4430. return;
  4431. state = le32_to_cpu(event_data->NewValue);
  4432. if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
  4433. MPI2_RAID_VOL_STATE_FAILED)
  4434. _scsih_set_volume_delete_flag(ioc,
  4435. le16_to_cpu(event_data->VolDevHandle));
  4436. }
  4437. /**
  4438. * _scsih_temp_threshold_events - display temperature threshold exceeded events
  4439. * @ioc: per adapter object
  4440. * @event_data: the temp threshold event data
  4441. * Context: interrupt time.
  4442. */
  4443. static void
  4444. _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
  4445. Mpi2EventDataTemperature_t *event_data)
  4446. {
  4447. u32 doorbell;
  4448. if (ioc->temp_sensors_count >= event_data->SensorNum) {
  4449. ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
  4450. le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
  4451. le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
  4452. le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
  4453. le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
  4454. event_data->SensorNum);
  4455. ioc_err(ioc, "Current Temp In Celsius: %d\n",
  4456. event_data->CurrentTemperature);
  4457. if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
  4458. doorbell = mpt3sas_base_get_iocstate(ioc, 0);
  4459. if ((doorbell & MPI2_IOC_STATE_MASK) ==
  4460. MPI2_IOC_STATE_FAULT) {
  4461. mpt3sas_print_fault_code(ioc,
  4462. doorbell & MPI2_DOORBELL_DATA_MASK);
  4463. } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
  4464. MPI2_IOC_STATE_COREDUMP) {
  4465. mpt3sas_print_coredump_info(ioc,
  4466. doorbell & MPI2_DOORBELL_DATA_MASK);
  4467. }
  4468. }
  4469. }
  4470. }
  4471. static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
  4472. {
  4473. struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
  4474. if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
  4475. return 0;
  4476. if (pending)
  4477. return test_and_set_bit(0, &priv->ata_command_pending);
  4478. clear_bit(0, &priv->ata_command_pending);
  4479. return 0;
  4480. }
  4481. /**
  4482. * _scsih_flush_running_cmds - completing outstanding commands.
  4483. * @ioc: per adapter object
  4484. *
  4485. * The flushing out of all pending scmd commands following host reset,
  4486. * where all IO is dropped to the floor.
  4487. */
  4488. static void
  4489. _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
  4490. {
  4491. struct scsi_cmnd *scmd;
  4492. struct scsiio_tracker *st;
  4493. u16 smid;
  4494. int count = 0;
  4495. for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
  4496. scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
  4497. if (!scmd)
  4498. continue;
  4499. count++;
  4500. _scsih_set_satl_pending(scmd, false);
  4501. st = scsi_cmd_priv(scmd);
  4502. mpt3sas_base_clear_st(ioc, st);
  4503. scsi_dma_unmap(scmd);
  4504. if (ioc->pci_error_recovery || ioc->remove_host)
  4505. scmd->result = DID_NO_CONNECT << 16;
  4506. else
  4507. scmd->result = DID_RESET << 16;
  4508. scsi_done(scmd);
  4509. }
  4510. dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
  4511. }
  4512. /**
  4513. * _scsih_setup_eedp - setup MPI request for EEDP transfer
  4514. * @ioc: per adapter object
  4515. * @scmd: pointer to scsi command object
  4516. * @mpi_request: pointer to the SCSI_IO request message frame
  4517. *
  4518. * Supporting protection 1 and 3.
  4519. */
  4520. static void
  4521. _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
  4522. Mpi25SCSIIORequest_t *mpi_request)
  4523. {
  4524. u16 eedp_flags;
  4525. Mpi25SCSIIORequest_t *mpi_request_3v =
  4526. (Mpi25SCSIIORequest_t *)mpi_request;
  4527. switch (scsi_get_prot_op(scmd)) {
  4528. case SCSI_PROT_READ_STRIP:
  4529. eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
  4530. break;
  4531. case SCSI_PROT_WRITE_INSERT:
  4532. eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
  4533. break;
  4534. default:
  4535. return;
  4536. }
  4537. if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
  4538. eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
  4539. if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
  4540. eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
  4541. if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
  4542. eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
  4543. mpi_request->CDB.EEDP32.PrimaryReferenceTag =
  4544. cpu_to_be32(scsi_prot_ref_tag(scmd));
  4545. }
  4546. mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd));
  4547. if (ioc->is_gen35_ioc)
  4548. eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
  4549. mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
  4550. }
  4551. /**
  4552. * _scsih_eedp_error_handling - return sense code for EEDP errors
  4553. * @scmd: pointer to scsi command object
  4554. * @ioc_status: ioc status
  4555. */
  4556. static void
  4557. _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
  4558. {
  4559. u8 ascq;
  4560. switch (ioc_status) {
  4561. case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
  4562. ascq = 0x01;
  4563. break;
  4564. case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
  4565. ascq = 0x02;
  4566. break;
  4567. case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
  4568. ascq = 0x03;
  4569. break;
  4570. default:
  4571. ascq = 0x00;
  4572. break;
  4573. }
  4574. scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq);
  4575. set_host_byte(scmd, DID_ABORT);
  4576. }
  4577. /**
  4578. * scsih_qcmd - main scsi request entry point
  4579. * @shost: SCSI host pointer
  4580. * @scmd: pointer to scsi command object
  4581. *
  4582. * The callback index is set inside `ioc->scsi_io_cb_idx`.
  4583. *
  4584. * Return: 0 on success. If there's a failure, return either:
  4585. * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
  4586. * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
  4587. */
  4588. static int
  4589. scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
  4590. {
  4591. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  4592. struct MPT3SAS_DEVICE *sas_device_priv_data;
  4593. struct MPT3SAS_TARGET *sas_target_priv_data;
  4594. struct _raid_device *raid_device;
  4595. struct request *rq = scsi_cmd_to_rq(scmd);
  4596. int class;
  4597. Mpi25SCSIIORequest_t *mpi_request;
  4598. struct _pcie_device *pcie_device = NULL;
  4599. u32 mpi_control;
  4600. u16 smid;
  4601. u16 handle;
  4602. if (ioc->logging_level & MPT_DEBUG_SCSI)
  4603. scsi_print_command(scmd);
  4604. sas_device_priv_data = scmd->device->hostdata;
  4605. if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
  4606. scmd->result = DID_NO_CONNECT << 16;
  4607. scsi_done(scmd);
  4608. return 0;
  4609. }
  4610. if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
  4611. scmd->result = DID_NO_CONNECT << 16;
  4612. scsi_done(scmd);
  4613. return 0;
  4614. }
  4615. sas_target_priv_data = sas_device_priv_data->sas_target;
  4616. /* invalid device handle */
  4617. handle = sas_target_priv_data->handle;
  4618. /*
  4619. * Avoid error handling escallation when device is disconnected
  4620. */
  4621. if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) {
  4622. if (scmd->device->host->shost_state == SHOST_RECOVERY &&
  4623. scmd->cmnd[0] == TEST_UNIT_READY) {
  4624. scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
  4625. scsi_done(scmd);
  4626. return 0;
  4627. }
  4628. }
  4629. if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
  4630. scmd->result = DID_NO_CONNECT << 16;
  4631. scsi_done(scmd);
  4632. return 0;
  4633. }
  4634. if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
  4635. /* host recovery or link resets sent via IOCTLs */
  4636. return SCSI_MLQUEUE_HOST_BUSY;
  4637. } else if (sas_target_priv_data->deleted) {
  4638. /* device has been deleted */
  4639. scmd->result = DID_NO_CONNECT << 16;
  4640. scsi_done(scmd);
  4641. return 0;
  4642. } else if (sas_target_priv_data->tm_busy ||
  4643. sas_device_priv_data->block) {
  4644. /* device busy with task management */
  4645. return SCSI_MLQUEUE_DEVICE_BUSY;
  4646. }
  4647. /*
  4648. * Bug work around for firmware SATL handling. The loop
  4649. * is based on atomic operations and ensures consistency
  4650. * since we're lockless at this point
  4651. */
  4652. do {
  4653. if (test_bit(0, &sas_device_priv_data->ata_command_pending))
  4654. return SCSI_MLQUEUE_DEVICE_BUSY;
  4655. } while (_scsih_set_satl_pending(scmd, true));
  4656. if (scmd->sc_data_direction == DMA_FROM_DEVICE)
  4657. mpi_control = MPI2_SCSIIO_CONTROL_READ;
  4658. else if (scmd->sc_data_direction == DMA_TO_DEVICE)
  4659. mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
  4660. else
  4661. mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
  4662. /* set tags */
  4663. mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
  4664. /* NCQ Prio supported, make sure control indicated high priority */
  4665. if (sas_device_priv_data->ncq_prio_enable) {
  4666. class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
  4667. if (class == IOPRIO_CLASS_RT)
  4668. mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
  4669. }
  4670. /* Make sure Device is not raid volume.
  4671. * We do not expose raid functionality to upper layer for warpdrive.
  4672. */
  4673. if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
  4674. && !scsih_is_nvme(&scmd->device->sdev_gendev))
  4675. && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
  4676. mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
  4677. smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
  4678. if (!smid) {
  4679. ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
  4680. _scsih_set_satl_pending(scmd, false);
  4681. goto out;
  4682. }
  4683. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  4684. memset(mpi_request, 0, ioc->request_sz);
  4685. _scsih_setup_eedp(ioc, scmd, mpi_request);
  4686. if (scmd->cmd_len == 32)
  4687. mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
  4688. mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
  4689. if (sas_device_priv_data->sas_target->flags &
  4690. MPT_TARGET_FLAGS_RAID_COMPONENT)
  4691. mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
  4692. else
  4693. mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
  4694. mpi_request->DevHandle = cpu_to_le16(handle);
  4695. mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
  4696. mpi_request->Control = cpu_to_le32(mpi_control);
  4697. mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
  4698. mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
  4699. mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
  4700. mpi_request->SenseBufferLowAddress =
  4701. mpt3sas_base_get_sense_buffer_dma(ioc, smid);
  4702. mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
  4703. int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
  4704. mpi_request->LUN);
  4705. memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
  4706. if (mpi_request->DataLength) {
  4707. pcie_device = sas_target_priv_data->pcie_dev;
  4708. if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
  4709. mpt3sas_base_free_smid(ioc, smid);
  4710. _scsih_set_satl_pending(scmd, false);
  4711. goto out;
  4712. }
  4713. } else
  4714. ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
  4715. raid_device = sas_target_priv_data->raid_device;
  4716. if (raid_device && raid_device->direct_io_enabled)
  4717. mpt3sas_setup_direct_io(ioc, scmd,
  4718. raid_device, mpi_request);
  4719. if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
  4720. if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
  4721. mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
  4722. MPI25_SCSIIO_IOFLAGS_FAST_PATH);
  4723. ioc->put_smid_fast_path(ioc, smid, handle);
  4724. } else
  4725. ioc->put_smid_scsi_io(ioc, smid,
  4726. le16_to_cpu(mpi_request->DevHandle));
  4727. } else
  4728. ioc->put_smid_default(ioc, smid);
  4729. return 0;
  4730. out:
  4731. return SCSI_MLQUEUE_HOST_BUSY;
  4732. }
  4733. /**
  4734. * _scsih_normalize_sense - normalize descriptor and fixed format sense data
  4735. * @sense_buffer: sense data returned by target
  4736. * @data: normalized skey/asc/ascq
  4737. */
  4738. static void
  4739. _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
  4740. {
  4741. if ((sense_buffer[0] & 0x7F) >= 0x72) {
  4742. /* descriptor format */
  4743. data->skey = sense_buffer[1] & 0x0F;
  4744. data->asc = sense_buffer[2];
  4745. data->ascq = sense_buffer[3];
  4746. } else {
  4747. /* fixed format */
  4748. data->skey = sense_buffer[2] & 0x0F;
  4749. data->asc = sense_buffer[12];
  4750. data->ascq = sense_buffer[13];
  4751. }
  4752. }
  4753. /**
  4754. * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request
  4755. * @ioc: per adapter object
  4756. * @scmd: pointer to scsi command object
  4757. * @mpi_reply: reply mf payload returned from firmware
  4758. * @smid: ?
  4759. *
  4760. * scsi_status - SCSI Status code returned from target device
  4761. * scsi_state - state info associated with SCSI_IO determined by ioc
  4762. * ioc_status - ioc supplied status info
  4763. */
  4764. static void
  4765. _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
  4766. Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
  4767. {
  4768. u32 response_info;
  4769. u8 *response_bytes;
  4770. u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
  4771. MPI2_IOCSTATUS_MASK;
  4772. u8 scsi_state = mpi_reply->SCSIState;
  4773. u8 scsi_status = mpi_reply->SCSIStatus;
  4774. char *desc_ioc_state = NULL;
  4775. char *desc_scsi_status = NULL;
  4776. char *desc_scsi_state = ioc->tmp_string;
  4777. u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
  4778. struct _sas_device *sas_device = NULL;
  4779. struct _pcie_device *pcie_device = NULL;
  4780. struct scsi_target *starget = scmd->device->sdev_target;
  4781. struct MPT3SAS_TARGET *priv_target = starget->hostdata;
  4782. char *device_str = NULL;
  4783. if (!priv_target)
  4784. return;
  4785. if (ioc->hide_ir_msg)
  4786. device_str = "WarpDrive";
  4787. else
  4788. device_str = "volume";
  4789. if (log_info == 0x31170000)
  4790. return;
  4791. switch (ioc_status) {
  4792. case MPI2_IOCSTATUS_SUCCESS:
  4793. desc_ioc_state = "success";
  4794. break;
  4795. case MPI2_IOCSTATUS_INVALID_FUNCTION:
  4796. desc_ioc_state = "invalid function";
  4797. break;
  4798. case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
  4799. desc_ioc_state = "scsi recovered error";
  4800. break;
  4801. case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
  4802. desc_ioc_state = "scsi invalid dev handle";
  4803. break;
  4804. case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
  4805. desc_ioc_state = "scsi device not there";
  4806. break;
  4807. case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
  4808. desc_ioc_state = "scsi data overrun";
  4809. break;
  4810. case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
  4811. desc_ioc_state = "scsi data underrun";
  4812. break;
  4813. case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
  4814. desc_ioc_state = "scsi io data error";
  4815. break;
  4816. case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
  4817. desc_ioc_state = "scsi protocol error";
  4818. break;
  4819. case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
  4820. desc_ioc_state = "scsi task terminated";
  4821. break;
  4822. case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
  4823. desc_ioc_state = "scsi residual mismatch";
  4824. break;
  4825. case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
  4826. desc_ioc_state = "scsi task mgmt failed";
  4827. break;
  4828. case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
  4829. desc_ioc_state = "scsi ioc terminated";
  4830. break;
  4831. case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
  4832. desc_ioc_state = "scsi ext terminated";
  4833. break;
  4834. case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
  4835. desc_ioc_state = "eedp guard error";
  4836. break;
  4837. case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
  4838. desc_ioc_state = "eedp ref tag error";
  4839. break;
  4840. case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
  4841. desc_ioc_state = "eedp app tag error";
  4842. break;
  4843. case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
  4844. desc_ioc_state = "insufficient power";
  4845. break;
  4846. default:
  4847. desc_ioc_state = "unknown";
  4848. break;
  4849. }
  4850. switch (scsi_status) {
  4851. case MPI2_SCSI_STATUS_GOOD:
  4852. desc_scsi_status = "good";
  4853. break;
  4854. case MPI2_SCSI_STATUS_CHECK_CONDITION:
  4855. desc_scsi_status = "check condition";
  4856. break;
  4857. case MPI2_SCSI_STATUS_CONDITION_MET:
  4858. desc_scsi_status = "condition met";
  4859. break;
  4860. case MPI2_SCSI_STATUS_BUSY:
  4861. desc_scsi_status = "busy";
  4862. break;
  4863. case MPI2_SCSI_STATUS_INTERMEDIATE:
  4864. desc_scsi_status = "intermediate";
  4865. break;
  4866. case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
  4867. desc_scsi_status = "intermediate condmet";
  4868. break;
  4869. case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
  4870. desc_scsi_status = "reservation conflict";
  4871. break;
  4872. case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
  4873. desc_scsi_status = "command terminated";
  4874. break;
  4875. case MPI2_SCSI_STATUS_TASK_SET_FULL:
  4876. desc_scsi_status = "task set full";
  4877. break;
  4878. case MPI2_SCSI_STATUS_ACA_ACTIVE:
  4879. desc_scsi_status = "aca active";
  4880. break;
  4881. case MPI2_SCSI_STATUS_TASK_ABORTED:
  4882. desc_scsi_status = "task aborted";
  4883. break;
  4884. default:
  4885. desc_scsi_status = "unknown";
  4886. break;
  4887. }
  4888. desc_scsi_state[0] = '\0';
  4889. if (!scsi_state)
  4890. desc_scsi_state = " ";
  4891. if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
  4892. strcat(desc_scsi_state, "response info ");
  4893. if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
  4894. strcat(desc_scsi_state, "state terminated ");
  4895. if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
  4896. strcat(desc_scsi_state, "no status ");
  4897. if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
  4898. strcat(desc_scsi_state, "autosense failed ");
  4899. if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
  4900. strcat(desc_scsi_state, "autosense valid ");
  4901. scsi_print_command(scmd);
  4902. if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
  4903. ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
  4904. device_str, (u64)priv_target->sas_address);
  4905. } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
  4906. pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
  4907. if (pcie_device) {
  4908. ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
  4909. (u64)pcie_device->wwid, pcie_device->port_num);
  4910. if (pcie_device->enclosure_handle != 0)
  4911. ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
  4912. (u64)pcie_device->enclosure_logical_id,
  4913. pcie_device->slot);
  4914. if (pcie_device->connector_name[0])
  4915. ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
  4916. pcie_device->enclosure_level,
  4917. pcie_device->connector_name);
  4918. pcie_device_put(pcie_device);
  4919. }
  4920. } else {
  4921. sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
  4922. if (sas_device) {
  4923. ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
  4924. (u64)sas_device->sas_address, sas_device->phy);
  4925. _scsih_display_enclosure_chassis_info(ioc, sas_device,
  4926. NULL, NULL);
  4927. sas_device_put(sas_device);
  4928. }
  4929. }
  4930. ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
  4931. le16_to_cpu(mpi_reply->DevHandle),
  4932. desc_ioc_state, ioc_status, smid);
  4933. ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
  4934. scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
  4935. ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
  4936. le16_to_cpu(mpi_reply->TaskTag),
  4937. le32_to_cpu(mpi_reply->TransferCount), scmd->result);
  4938. ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
  4939. desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
  4940. if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
  4941. struct sense_info data;
  4942. _scsih_normalize_sense(scmd->sense_buffer, &data);
  4943. ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
  4944. data.skey, data.asc, data.ascq,
  4945. le32_to_cpu(mpi_reply->SenseCount));
  4946. }
  4947. if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
  4948. response_info = le32_to_cpu(mpi_reply->ResponseInfo);
  4949. response_bytes = (u8 *)&response_info;
  4950. _scsih_response_code(ioc, response_bytes[0]);
  4951. }
  4952. }
  4953. /**
  4954. * _scsih_turn_on_pfa_led - illuminate PFA LED
  4955. * @ioc: per adapter object
  4956. * @handle: device handle
  4957. * Context: process
  4958. */
  4959. static void
  4960. _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  4961. {
  4962. Mpi2SepReply_t mpi_reply;
  4963. Mpi2SepRequest_t mpi_request;
  4964. struct _sas_device *sas_device;
  4965. sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
  4966. if (!sas_device)
  4967. return;
  4968. memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
  4969. mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
  4970. mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
  4971. mpi_request.SlotStatus =
  4972. cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
  4973. mpi_request.DevHandle = cpu_to_le16(handle);
  4974. mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
  4975. if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
  4976. &mpi_request)) != 0) {
  4977. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  4978. __FILE__, __LINE__, __func__);
  4979. goto out;
  4980. }
  4981. sas_device->pfa_led_on = 1;
  4982. if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
  4983. dewtprintk(ioc,
  4984. ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
  4985. le16_to_cpu(mpi_reply.IOCStatus),
  4986. le32_to_cpu(mpi_reply.IOCLogInfo)));
  4987. goto out;
  4988. }
  4989. out:
  4990. sas_device_put(sas_device);
  4991. }
  4992. /**
  4993. * _scsih_turn_off_pfa_led - turn off Fault LED
  4994. * @ioc: per adapter object
  4995. * @sas_device: sas device whose PFA LED has to turned off
  4996. * Context: process
  4997. */
  4998. static void
  4999. _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
  5000. struct _sas_device *sas_device)
  5001. {
  5002. Mpi2SepReply_t mpi_reply;
  5003. Mpi2SepRequest_t mpi_request;
  5004. memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
  5005. mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
  5006. mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
  5007. mpi_request.SlotStatus = 0;
  5008. mpi_request.Slot = cpu_to_le16(sas_device->slot);
  5009. mpi_request.DevHandle = 0;
  5010. mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
  5011. mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
  5012. if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
  5013. &mpi_request)) != 0) {
  5014. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  5015. __FILE__, __LINE__, __func__);
  5016. return;
  5017. }
  5018. if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
  5019. dewtprintk(ioc,
  5020. ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
  5021. le16_to_cpu(mpi_reply.IOCStatus),
  5022. le32_to_cpu(mpi_reply.IOCLogInfo)));
  5023. return;
  5024. }
  5025. }
  5026. /**
  5027. * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
  5028. * @ioc: per adapter object
  5029. * @handle: device handle
  5030. * Context: interrupt.
  5031. */
  5032. static void
  5033. _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  5034. {
  5035. struct fw_event_work *fw_event;
  5036. fw_event = alloc_fw_event_work(0);
  5037. if (!fw_event)
  5038. return;
  5039. fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
  5040. fw_event->device_handle = handle;
  5041. fw_event->ioc = ioc;
  5042. _scsih_fw_event_add(ioc, fw_event);
  5043. fw_event_work_put(fw_event);
  5044. }
  5045. /**
  5046. * _scsih_smart_predicted_fault - process smart errors
  5047. * @ioc: per adapter object
  5048. * @handle: device handle
  5049. * Context: interrupt.
  5050. */
  5051. static void
  5052. _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  5053. {
  5054. struct scsi_target *starget;
  5055. struct MPT3SAS_TARGET *sas_target_priv_data;
  5056. Mpi2EventNotificationReply_t *event_reply;
  5057. Mpi2EventDataSasDeviceStatusChange_t *event_data;
  5058. struct _sas_device *sas_device;
  5059. ssize_t sz;
  5060. unsigned long flags;
  5061. /* only handle non-raid devices */
  5062. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  5063. sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
  5064. if (!sas_device)
  5065. goto out_unlock;
  5066. starget = sas_device->starget;
  5067. sas_target_priv_data = starget->hostdata;
  5068. if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
  5069. ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
  5070. goto out_unlock;
  5071. _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
  5072. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  5073. if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
  5074. _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
  5075. /* insert into event log */
  5076. sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
  5077. sizeof(Mpi2EventDataSasDeviceStatusChange_t);
  5078. event_reply = kzalloc(sz, GFP_ATOMIC);
  5079. if (!event_reply) {
  5080. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  5081. __FILE__, __LINE__, __func__);
  5082. goto out;
  5083. }
  5084. event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
  5085. event_reply->Event =
  5086. cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
  5087. event_reply->MsgLength = sz/4;
  5088. event_reply->EventDataLength =
  5089. cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
  5090. event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
  5091. event_reply->EventData;
  5092. event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
  5093. event_data->ASC = 0x5D;
  5094. event_data->DevHandle = cpu_to_le16(handle);
  5095. event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
  5096. mpt3sas_ctl_add_to_event_log(ioc, event_reply);
  5097. kfree(event_reply);
  5098. out:
  5099. if (sas_device)
  5100. sas_device_put(sas_device);
  5101. return;
  5102. out_unlock:
  5103. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  5104. goto out;
  5105. }
  5106. /**
  5107. * _scsih_io_done - scsi request callback
  5108. * @ioc: per adapter object
  5109. * @smid: system request message index
  5110. * @msix_index: MSIX table index supplied by the OS
  5111. * @reply: reply message frame(lower 32bit addr)
  5112. *
  5113. * Callback handler when using _scsih_qcmd.
  5114. *
  5115. * Return: 1 meaning mf should be freed from _base_interrupt
  5116. * 0 means the mf is freed from this function.
  5117. */
  5118. static u8
  5119. _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
  5120. {
  5121. Mpi25SCSIIORequest_t *mpi_request;
  5122. Mpi2SCSIIOReply_t *mpi_reply;
  5123. struct scsi_cmnd *scmd;
  5124. struct scsiio_tracker *st;
  5125. u16 ioc_status;
  5126. u32 xfer_cnt;
  5127. u8 scsi_state;
  5128. u8 scsi_status;
  5129. u32 log_info;
  5130. struct MPT3SAS_DEVICE *sas_device_priv_data;
  5131. u32 response_code = 0;
  5132. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  5133. scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
  5134. if (scmd == NULL)
  5135. return 1;
  5136. _scsih_set_satl_pending(scmd, false);
  5137. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  5138. if (mpi_reply == NULL) {
  5139. scmd->result = DID_OK << 16;
  5140. goto out;
  5141. }
  5142. sas_device_priv_data = scmd->device->hostdata;
  5143. if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
  5144. sas_device_priv_data->sas_target->deleted) {
  5145. scmd->result = DID_NO_CONNECT << 16;
  5146. goto out;
  5147. }
  5148. ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
  5149. /*
  5150. * WARPDRIVE: If direct_io is set then it is directIO,
  5151. * the failed direct I/O should be redirected to volume
  5152. */
  5153. st = scsi_cmd_priv(scmd);
  5154. if (st->direct_io &&
  5155. ((ioc_status & MPI2_IOCSTATUS_MASK)
  5156. != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
  5157. st->direct_io = 0;
  5158. st->scmd = scmd;
  5159. memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
  5160. mpi_request->DevHandle =
  5161. cpu_to_le16(sas_device_priv_data->sas_target->handle);
  5162. ioc->put_smid_scsi_io(ioc, smid,
  5163. sas_device_priv_data->sas_target->handle);
  5164. return 0;
  5165. }
  5166. /* turning off TLR */
  5167. scsi_state = mpi_reply->SCSIState;
  5168. if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
  5169. response_code =
  5170. le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
  5171. if (!sas_device_priv_data->tlr_snoop_check) {
  5172. sas_device_priv_data->tlr_snoop_check++;
  5173. if ((!ioc->is_warpdrive &&
  5174. !scsih_is_raid(&scmd->device->sdev_gendev) &&
  5175. !scsih_is_nvme(&scmd->device->sdev_gendev))
  5176. && sas_is_tlr_enabled(scmd->device) &&
  5177. response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
  5178. sas_disable_tlr(scmd->device);
  5179. sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
  5180. }
  5181. }
  5182. xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
  5183. scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
  5184. if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
  5185. log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
  5186. else
  5187. log_info = 0;
  5188. ioc_status &= MPI2_IOCSTATUS_MASK;
  5189. scsi_status = mpi_reply->SCSIStatus;
  5190. if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
  5191. (scsi_status == MPI2_SCSI_STATUS_BUSY ||
  5192. scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
  5193. scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
  5194. ioc_status = MPI2_IOCSTATUS_SUCCESS;
  5195. }
  5196. if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
  5197. struct sense_info data;
  5198. const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
  5199. smid);
  5200. u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
  5201. le32_to_cpu(mpi_reply->SenseCount));
  5202. memcpy(scmd->sense_buffer, sense_data, sz);
  5203. _scsih_normalize_sense(scmd->sense_buffer, &data);
  5204. /* failure prediction threshold exceeded */
  5205. if (data.asc == 0x5D)
  5206. _scsih_smart_predicted_fault(ioc,
  5207. le16_to_cpu(mpi_reply->DevHandle));
  5208. mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
  5209. if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
  5210. ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
  5211. (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
  5212. (scmd->sense_buffer[2] == HARDWARE_ERROR)))
  5213. _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
  5214. }
  5215. switch (ioc_status) {
  5216. case MPI2_IOCSTATUS_BUSY:
  5217. case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
  5218. scmd->result = SAM_STAT_BUSY;
  5219. break;
  5220. case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
  5221. scmd->result = DID_NO_CONNECT << 16;
  5222. break;
  5223. case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
  5224. if (sas_device_priv_data->block) {
  5225. scmd->result = DID_TRANSPORT_DISRUPTED << 16;
  5226. goto out;
  5227. }
  5228. if (log_info == 0x31110630) {
  5229. if (scmd->retries > 2) {
  5230. scmd->result = DID_NO_CONNECT << 16;
  5231. scsi_device_set_state(scmd->device,
  5232. SDEV_OFFLINE);
  5233. } else {
  5234. scmd->result = DID_SOFT_ERROR << 16;
  5235. scmd->device->expecting_cc_ua = 1;
  5236. }
  5237. break;
  5238. } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
  5239. scmd->result = DID_RESET << 16;
  5240. break;
  5241. } else if ((scmd->device->channel == RAID_CHANNEL) &&
  5242. (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
  5243. MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
  5244. scmd->result = DID_RESET << 16;
  5245. break;
  5246. }
  5247. scmd->result = DID_SOFT_ERROR << 16;
  5248. break;
  5249. case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
  5250. case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
  5251. scmd->result = DID_RESET << 16;
  5252. break;
  5253. case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
  5254. if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
  5255. scmd->result = DID_SOFT_ERROR << 16;
  5256. else
  5257. scmd->result = (DID_OK << 16) | scsi_status;
  5258. break;
  5259. case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
  5260. scmd->result = (DID_OK << 16) | scsi_status;
  5261. if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
  5262. break;
  5263. if (xfer_cnt < scmd->underflow) {
  5264. if (scsi_status == SAM_STAT_BUSY)
  5265. scmd->result = SAM_STAT_BUSY;
  5266. else
  5267. scmd->result = DID_SOFT_ERROR << 16;
  5268. } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
  5269. MPI2_SCSI_STATE_NO_SCSI_STATUS))
  5270. scmd->result = DID_SOFT_ERROR << 16;
  5271. else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
  5272. scmd->result = DID_RESET << 16;
  5273. else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
  5274. mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
  5275. mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
  5276. scsi_build_sense(scmd, 0, ILLEGAL_REQUEST,
  5277. 0x20, 0);
  5278. }
  5279. break;
  5280. case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
  5281. scsi_set_resid(scmd, 0);
  5282. fallthrough;
  5283. case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
  5284. case MPI2_IOCSTATUS_SUCCESS:
  5285. scmd->result = (DID_OK << 16) | scsi_status;
  5286. if (response_code ==
  5287. MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
  5288. (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
  5289. MPI2_SCSI_STATE_NO_SCSI_STATUS)))
  5290. scmd->result = DID_SOFT_ERROR << 16;
  5291. else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
  5292. scmd->result = DID_RESET << 16;
  5293. break;
  5294. case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
  5295. case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
  5296. case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
  5297. _scsih_eedp_error_handling(scmd, ioc_status);
  5298. break;
  5299. case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
  5300. case MPI2_IOCSTATUS_INVALID_FUNCTION:
  5301. case MPI2_IOCSTATUS_INVALID_SGL:
  5302. case MPI2_IOCSTATUS_INTERNAL_ERROR:
  5303. case MPI2_IOCSTATUS_INVALID_FIELD:
  5304. case MPI2_IOCSTATUS_INVALID_STATE:
  5305. case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
  5306. case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
  5307. case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
  5308. default:
  5309. scmd->result = DID_SOFT_ERROR << 16;
  5310. break;
  5311. }
  5312. if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
  5313. _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
  5314. out:
  5315. scsi_dma_unmap(scmd);
  5316. mpt3sas_base_free_smid(ioc, smid);
  5317. scsi_done(scmd);
  5318. return 0;
  5319. }
  5320. /**
  5321. * _scsih_update_vphys_after_reset - update the Port's
  5322. * vphys_list after reset
  5323. * @ioc: per adapter object
  5324. *
  5325. * Returns nothing.
  5326. */
  5327. static void
  5328. _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
  5329. {
  5330. u16 sz, ioc_status;
  5331. int i;
  5332. Mpi2ConfigReply_t mpi_reply;
  5333. Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
  5334. u16 attached_handle;
  5335. u64 attached_sas_addr;
  5336. u8 found = 0, port_id;
  5337. Mpi2SasPhyPage0_t phy_pg0;
  5338. struct hba_port *port, *port_next, *mport;
  5339. struct virtual_phy *vphy, *vphy_next;
  5340. struct _sas_device *sas_device;
  5341. /*
  5342. * Mark all the vphys objects as dirty.
  5343. */
  5344. list_for_each_entry_safe(port, port_next,
  5345. &ioc->port_table_list, list) {
  5346. if (!port->vphys_mask)
  5347. continue;
  5348. list_for_each_entry_safe(vphy, vphy_next,
  5349. &port->vphys_list, list) {
  5350. vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
  5351. }
  5352. }
  5353. /*
  5354. * Read SASIOUnitPage0 to get each HBA Phy's data.
  5355. */
  5356. sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) +
  5357. (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t));
  5358. sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
  5359. if (!sas_iounit_pg0) {
  5360. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  5361. __FILE__, __LINE__, __func__);
  5362. return;
  5363. }
  5364. if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
  5365. sas_iounit_pg0, sz)) != 0)
  5366. goto out;
  5367. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
  5368. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  5369. goto out;
  5370. /*
  5371. * Loop over each HBA Phy.
  5372. */
  5373. for (i = 0; i < ioc->sas_hba.num_phys; i++) {
  5374. /*
  5375. * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
  5376. */
  5377. if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
  5378. MPI2_SAS_NEG_LINK_RATE_1_5)
  5379. continue;
  5380. /*
  5381. * Check whether Phy is connected to SEP device or not,
  5382. * if it is SEP device then read the Phy's SASPHYPage0 data to
  5383. * determine whether Phy is a virtual Phy or not. if it is
  5384. * virtual phy then it is conformed that the attached remote
  5385. * device is a HBA's vSES device.
  5386. */
  5387. if (!(le32_to_cpu(
  5388. sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
  5389. MPI2_SAS_DEVICE_INFO_SEP))
  5390. continue;
  5391. if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
  5392. i))) {
  5393. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  5394. __FILE__, __LINE__, __func__);
  5395. continue;
  5396. }
  5397. if (!(le32_to_cpu(phy_pg0.PhyInfo) &
  5398. MPI2_SAS_PHYINFO_VIRTUAL_PHY))
  5399. continue;
  5400. /*
  5401. * Get the vSES device's SAS Address.
  5402. */
  5403. attached_handle = le16_to_cpu(
  5404. sas_iounit_pg0->PhyData[i].AttachedDevHandle);
  5405. if (_scsih_get_sas_address(ioc, attached_handle,
  5406. &attached_sas_addr) != 0) {
  5407. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  5408. __FILE__, __LINE__, __func__);
  5409. continue;
  5410. }
  5411. found = 0;
  5412. port = port_next = NULL;
  5413. /*
  5414. * Loop over each virtual_phy object from
  5415. * each port's vphys_list.
  5416. */
  5417. list_for_each_entry_safe(port,
  5418. port_next, &ioc->port_table_list, list) {
  5419. if (!port->vphys_mask)
  5420. continue;
  5421. list_for_each_entry_safe(vphy, vphy_next,
  5422. &port->vphys_list, list) {
  5423. /*
  5424. * Continue with next virtual_phy object
  5425. * if the object is not marked as dirty.
  5426. */
  5427. if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
  5428. continue;
  5429. /*
  5430. * Continue with next virtual_phy object
  5431. * if the object's SAS Address is not equals
  5432. * to current Phy's vSES device SAS Address.
  5433. */
  5434. if (vphy->sas_address != attached_sas_addr)
  5435. continue;
  5436. /*
  5437. * Enable current Phy number bit in object's
  5438. * phy_mask field.
  5439. */
  5440. if (!(vphy->phy_mask & (1 << i)))
  5441. vphy->phy_mask = (1 << i);
  5442. /*
  5443. * Get hba_port object from hba_port table
  5444. * corresponding to current phy's Port ID.
  5445. * if there is no hba_port object corresponding
  5446. * to Phy's Port ID then create a new hba_port
  5447. * object & add to hba_port table.
  5448. */
  5449. port_id = sas_iounit_pg0->PhyData[i].Port;
  5450. mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
  5451. if (!mport) {
  5452. mport = kzalloc(
  5453. sizeof(struct hba_port), GFP_KERNEL);
  5454. if (!mport)
  5455. break;
  5456. mport->port_id = port_id;
  5457. ioc_info(ioc,
  5458. "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
  5459. __func__, mport, mport->port_id);
  5460. list_add_tail(&mport->list,
  5461. &ioc->port_table_list);
  5462. }
  5463. /*
  5464. * If mport & port pointers are not pointing to
  5465. * same hba_port object then it means that vSES
  5466. * device's Port ID got changed after reset and
  5467. * hence move current virtual_phy object from
  5468. * port's vphys_list to mport's vphys_list.
  5469. */
  5470. if (port != mport) {
  5471. if (!mport->vphys_mask)
  5472. INIT_LIST_HEAD(
  5473. &mport->vphys_list);
  5474. mport->vphys_mask |= (1 << i);
  5475. port->vphys_mask &= ~(1 << i);
  5476. list_move(&vphy->list,
  5477. &mport->vphys_list);
  5478. sas_device = mpt3sas_get_sdev_by_addr(
  5479. ioc, attached_sas_addr, port);
  5480. if (sas_device)
  5481. sas_device->port = mport;
  5482. }
  5483. /*
  5484. * Earlier while updating the hba_port table,
  5485. * it is determined that there is no other
  5486. * direct attached device with mport's Port ID,
  5487. * Hence mport was marked as dirty. Only vSES
  5488. * device has this Port ID, so unmark the mport
  5489. * as dirt.
  5490. */
  5491. if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
  5492. mport->sas_address = 0;
  5493. mport->phy_mask = 0;
  5494. mport->flags &=
  5495. ~HBA_PORT_FLAG_DIRTY_PORT;
  5496. }
  5497. /*
  5498. * Unmark current virtual_phy object as dirty.
  5499. */
  5500. vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
  5501. found = 1;
  5502. break;
  5503. }
  5504. if (found)
  5505. break;
  5506. }
  5507. }
  5508. out:
  5509. kfree(sas_iounit_pg0);
  5510. }
  5511. /**
  5512. * _scsih_get_port_table_after_reset - Construct temporary port table
  5513. * @ioc: per adapter object
  5514. * @port_table: address where port table needs to be constructed
  5515. *
  5516. * return number of HBA port entries available after reset.
  5517. */
  5518. static int
  5519. _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
  5520. struct hba_port *port_table)
  5521. {
  5522. u16 sz, ioc_status;
  5523. int i, j;
  5524. Mpi2ConfigReply_t mpi_reply;
  5525. Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
  5526. u16 attached_handle;
  5527. u64 attached_sas_addr;
  5528. u8 found = 0, port_count = 0, port_id;
  5529. sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
  5530. * sizeof(Mpi2SasIOUnit0PhyData_t));
  5531. sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
  5532. if (!sas_iounit_pg0) {
  5533. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  5534. __FILE__, __LINE__, __func__);
  5535. return port_count;
  5536. }
  5537. if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
  5538. sas_iounit_pg0, sz)) != 0)
  5539. goto out;
  5540. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
  5541. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  5542. goto out;
  5543. for (i = 0; i < ioc->sas_hba.num_phys; i++) {
  5544. found = 0;
  5545. if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
  5546. MPI2_SAS_NEG_LINK_RATE_1_5)
  5547. continue;
  5548. attached_handle =
  5549. le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
  5550. if (_scsih_get_sas_address(
  5551. ioc, attached_handle, &attached_sas_addr) != 0) {
  5552. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  5553. __FILE__, __LINE__, __func__);
  5554. continue;
  5555. }
  5556. for (j = 0; j < port_count; j++) {
  5557. port_id = sas_iounit_pg0->PhyData[i].Port;
  5558. if (port_table[j].port_id == port_id &&
  5559. port_table[j].sas_address == attached_sas_addr) {
  5560. port_table[j].phy_mask |= (1 << i);
  5561. found = 1;
  5562. break;
  5563. }
  5564. }
  5565. if (found)
  5566. continue;
  5567. port_id = sas_iounit_pg0->PhyData[i].Port;
  5568. port_table[port_count].port_id = port_id;
  5569. port_table[port_count].phy_mask = (1 << i);
  5570. port_table[port_count].sas_address = attached_sas_addr;
  5571. port_count++;
  5572. }
  5573. out:
  5574. kfree(sas_iounit_pg0);
  5575. return port_count;
  5576. }
  5577. enum hba_port_matched_codes {
  5578. NOT_MATCHED = 0,
  5579. MATCHED_WITH_ADDR_AND_PHYMASK,
  5580. MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
  5581. MATCHED_WITH_ADDR_AND_SUBPHYMASK,
  5582. MATCHED_WITH_ADDR,
  5583. };
  5584. /**
  5585. * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
  5586. * from HBA port table
  5587. * @ioc: per adapter object
  5588. * @port_entry: hba port entry from temporary port table which needs to be
  5589. * searched for matched entry in the HBA port table
  5590. * @matched_port_entry: save matched hba port entry here
  5591. * @count: count of matched entries
  5592. *
  5593. * return type of matched entry found.
  5594. */
  5595. static enum hba_port_matched_codes
  5596. _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
  5597. struct hba_port *port_entry,
  5598. struct hba_port **matched_port_entry, int *count)
  5599. {
  5600. struct hba_port *port_table_entry, *matched_port = NULL;
  5601. enum hba_port_matched_codes matched_code = NOT_MATCHED;
  5602. int lcount = 0;
  5603. *matched_port_entry = NULL;
  5604. list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
  5605. if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
  5606. continue;
  5607. if ((port_table_entry->sas_address == port_entry->sas_address)
  5608. && (port_table_entry->phy_mask == port_entry->phy_mask)) {
  5609. matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
  5610. matched_port = port_table_entry;
  5611. break;
  5612. }
  5613. if ((port_table_entry->sas_address == port_entry->sas_address)
  5614. && (port_table_entry->phy_mask & port_entry->phy_mask)
  5615. && (port_table_entry->port_id == port_entry->port_id)) {
  5616. matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
  5617. matched_port = port_table_entry;
  5618. continue;
  5619. }
  5620. if ((port_table_entry->sas_address == port_entry->sas_address)
  5621. && (port_table_entry->phy_mask & port_entry->phy_mask)) {
  5622. if (matched_code ==
  5623. MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
  5624. continue;
  5625. matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
  5626. matched_port = port_table_entry;
  5627. continue;
  5628. }
  5629. if (port_table_entry->sas_address == port_entry->sas_address) {
  5630. if (matched_code ==
  5631. MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
  5632. continue;
  5633. if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
  5634. continue;
  5635. matched_code = MATCHED_WITH_ADDR;
  5636. matched_port = port_table_entry;
  5637. lcount++;
  5638. }
  5639. }
  5640. *matched_port_entry = matched_port;
  5641. if (matched_code == MATCHED_WITH_ADDR)
  5642. *count = lcount;
  5643. return matched_code;
  5644. }
  5645. /**
  5646. * _scsih_del_phy_part_of_anther_port - remove phy if it
  5647. * is a part of anther port
  5648. *@ioc: per adapter object
  5649. *@port_table: port table after reset
  5650. *@index: hba port entry index
  5651. *@port_count: number of ports available after host reset
  5652. *@offset: HBA phy bit offset
  5653. *
  5654. */
  5655. static void
  5656. _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
  5657. struct hba_port *port_table,
  5658. int index, u8 port_count, int offset)
  5659. {
  5660. struct _sas_node *sas_node = &ioc->sas_hba;
  5661. u32 i, found = 0;
  5662. for (i = 0; i < port_count; i++) {
  5663. if (i == index)
  5664. continue;
  5665. if (port_table[i].phy_mask & (1 << offset)) {
  5666. mpt3sas_transport_del_phy_from_an_existing_port(
  5667. ioc, sas_node, &sas_node->phy[offset]);
  5668. found = 1;
  5669. break;
  5670. }
  5671. }
  5672. if (!found)
  5673. port_table[index].phy_mask |= (1 << offset);
  5674. }
  5675. /**
  5676. * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
  5677. * right port
  5678. *@ioc: per adapter object
  5679. *@hba_port_entry: hba port table entry
  5680. *@port_table: temporary port table
  5681. *@index: hba port entry index
  5682. *@port_count: number of ports available after host reset
  5683. *
  5684. */
  5685. static void
  5686. _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
  5687. struct hba_port *hba_port_entry, struct hba_port *port_table,
  5688. int index, int port_count)
  5689. {
  5690. u32 phy_mask, offset = 0;
  5691. struct _sas_node *sas_node = &ioc->sas_hba;
  5692. phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
  5693. for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
  5694. if (phy_mask & (1 << offset)) {
  5695. if (!(port_table[index].phy_mask & (1 << offset))) {
  5696. _scsih_del_phy_part_of_anther_port(
  5697. ioc, port_table, index, port_count,
  5698. offset);
  5699. continue;
  5700. }
  5701. if (sas_node->phy[offset].phy_belongs_to_port)
  5702. mpt3sas_transport_del_phy_from_an_existing_port(
  5703. ioc, sas_node, &sas_node->phy[offset]);
  5704. mpt3sas_transport_add_phy_to_an_existing_port(
  5705. ioc, sas_node, &sas_node->phy[offset],
  5706. hba_port_entry->sas_address,
  5707. hba_port_entry);
  5708. }
  5709. }
  5710. }
  5711. /**
  5712. * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
  5713. * @ioc: per adapter object
  5714. *
  5715. * Returns nothing.
  5716. */
  5717. static void
  5718. _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
  5719. {
  5720. struct hba_port *port, *port_next;
  5721. struct virtual_phy *vphy, *vphy_next;
  5722. list_for_each_entry_safe(port, port_next,
  5723. &ioc->port_table_list, list) {
  5724. if (!port->vphys_mask)
  5725. continue;
  5726. list_for_each_entry_safe(vphy, vphy_next,
  5727. &port->vphys_list, list) {
  5728. if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
  5729. drsprintk(ioc, ioc_info(ioc,
  5730. "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
  5731. vphy, port->port_id,
  5732. vphy->phy_mask));
  5733. port->vphys_mask &= ~vphy->phy_mask;
  5734. list_del(&vphy->list);
  5735. kfree(vphy);
  5736. }
  5737. }
  5738. if (!port->vphys_mask && !port->sas_address)
  5739. port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
  5740. }
  5741. }
  5742. /**
  5743. * _scsih_del_dirty_port_entries - delete dirty port entries from port list
  5744. * after host reset
  5745. *@ioc: per adapter object
  5746. *
  5747. */
  5748. static void
  5749. _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
  5750. {
  5751. struct hba_port *port, *port_next;
  5752. list_for_each_entry_safe(port, port_next,
  5753. &ioc->port_table_list, list) {
  5754. if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
  5755. port->flags & HBA_PORT_FLAG_NEW_PORT)
  5756. continue;
  5757. drsprintk(ioc, ioc_info(ioc,
  5758. "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
  5759. port, port->port_id, port->phy_mask));
  5760. list_del(&port->list);
  5761. kfree(port);
  5762. }
  5763. }
  5764. /**
  5765. * _scsih_sas_port_refresh - Update HBA port table after host reset
  5766. * @ioc: per adapter object
  5767. */
  5768. static void
  5769. _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
  5770. {
  5771. u32 port_count = 0;
  5772. struct hba_port *port_table;
  5773. struct hba_port *port_table_entry;
  5774. struct hba_port *port_entry = NULL;
  5775. int i, j, count = 0, lcount = 0;
  5776. int ret;
  5777. u64 sas_addr;
  5778. u8 num_phys;
  5779. drsprintk(ioc, ioc_info(ioc,
  5780. "updating ports for sas_host(0x%016llx)\n",
  5781. (unsigned long long)ioc->sas_hba.sas_address));
  5782. mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
  5783. if (!num_phys) {
  5784. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  5785. __FILE__, __LINE__, __func__);
  5786. return;
  5787. }
  5788. if (num_phys > ioc->sas_hba.nr_phys_allocated) {
  5789. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  5790. __FILE__, __LINE__, __func__);
  5791. return;
  5792. }
  5793. ioc->sas_hba.num_phys = num_phys;
  5794. port_table = kcalloc(ioc->sas_hba.num_phys,
  5795. sizeof(struct hba_port), GFP_KERNEL);
  5796. if (!port_table)
  5797. return;
  5798. port_count = _scsih_get_port_table_after_reset(ioc, port_table);
  5799. if (!port_count)
  5800. return;
  5801. drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
  5802. for (j = 0; j < port_count; j++)
  5803. drsprintk(ioc, ioc_info(ioc,
  5804. "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
  5805. port_table[j].port_id,
  5806. port_table[j].phy_mask, port_table[j].sas_address));
  5807. list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
  5808. port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
  5809. drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
  5810. port_table_entry = NULL;
  5811. list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
  5812. drsprintk(ioc, ioc_info(ioc,
  5813. "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
  5814. port_table_entry->port_id,
  5815. port_table_entry->phy_mask,
  5816. port_table_entry->sas_address));
  5817. }
  5818. for (j = 0; j < port_count; j++) {
  5819. ret = _scsih_look_and_get_matched_port_entry(ioc,
  5820. &port_table[j], &port_entry, &count);
  5821. if (!port_entry) {
  5822. drsprintk(ioc, ioc_info(ioc,
  5823. "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
  5824. port_table[j].sas_address,
  5825. port_table[j].port_id));
  5826. continue;
  5827. }
  5828. switch (ret) {
  5829. case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
  5830. case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
  5831. _scsih_add_or_del_phys_from_existing_port(ioc,
  5832. port_entry, port_table, j, port_count);
  5833. break;
  5834. case MATCHED_WITH_ADDR:
  5835. sas_addr = port_table[j].sas_address;
  5836. for (i = 0; i < port_count; i++) {
  5837. if (port_table[i].sas_address == sas_addr)
  5838. lcount++;
  5839. }
  5840. if (count > 1 || lcount > 1)
  5841. port_entry = NULL;
  5842. else
  5843. _scsih_add_or_del_phys_from_existing_port(ioc,
  5844. port_entry, port_table, j, port_count);
  5845. }
  5846. if (!port_entry)
  5847. continue;
  5848. if (port_entry->port_id != port_table[j].port_id)
  5849. port_entry->port_id = port_table[j].port_id;
  5850. port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
  5851. port_entry->phy_mask = port_table[j].phy_mask;
  5852. }
  5853. port_table_entry = NULL;
  5854. }
  5855. /**
  5856. * _scsih_alloc_vphy - allocate virtual_phy object
  5857. * @ioc: per adapter object
  5858. * @port_id: Port ID number
  5859. * @phy_num: HBA Phy number
  5860. *
  5861. * Returns allocated virtual_phy object.
  5862. */
  5863. static struct virtual_phy *
  5864. _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
  5865. {
  5866. struct virtual_phy *vphy;
  5867. struct hba_port *port;
  5868. port = mpt3sas_get_port_by_id(ioc, port_id, 0);
  5869. if (!port)
  5870. return NULL;
  5871. vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
  5872. if (!vphy) {
  5873. vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
  5874. if (!vphy)
  5875. return NULL;
  5876. if (!port->vphys_mask)
  5877. INIT_LIST_HEAD(&port->vphys_list);
  5878. /*
  5879. * Enable bit corresponding to HBA phy number on its
  5880. * parent hba_port object's vphys_mask field.
  5881. */
  5882. port->vphys_mask |= (1 << phy_num);
  5883. vphy->phy_mask |= (1 << phy_num);
  5884. list_add_tail(&vphy->list, &port->vphys_list);
  5885. ioc_info(ioc,
  5886. "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
  5887. vphy, port->port_id, phy_num);
  5888. }
  5889. return vphy;
  5890. }
  5891. /**
  5892. * _scsih_sas_host_refresh - refreshing sas host object contents
  5893. * @ioc: per adapter object
  5894. * Context: user
  5895. *
  5896. * During port enable, fw will send topology events for every device. Its
  5897. * possible that the handles may change from the previous setting, so this
  5898. * code keeping handles updating if changed.
  5899. */
  5900. static void
  5901. _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
  5902. {
  5903. u16 sz;
  5904. u16 ioc_status;
  5905. int i;
  5906. Mpi2ConfigReply_t mpi_reply;
  5907. Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
  5908. u16 attached_handle;
  5909. u8 link_rate, port_id;
  5910. struct hba_port *port;
  5911. Mpi2SasPhyPage0_t phy_pg0;
  5912. dtmprintk(ioc,
  5913. ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
  5914. (u64)ioc->sas_hba.sas_address));
  5915. sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
  5916. * sizeof(Mpi2SasIOUnit0PhyData_t));
  5917. sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
  5918. if (!sas_iounit_pg0) {
  5919. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  5920. __FILE__, __LINE__, __func__);
  5921. return;
  5922. }
  5923. if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
  5924. sas_iounit_pg0, sz)) != 0)
  5925. goto out;
  5926. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
  5927. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  5928. goto out;
  5929. for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
  5930. link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
  5931. if (i == 0)
  5932. ioc->sas_hba.handle = le16_to_cpu(
  5933. sas_iounit_pg0->PhyData[0].ControllerDevHandle);
  5934. port_id = sas_iounit_pg0->PhyData[i].Port;
  5935. if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
  5936. port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
  5937. if (!port)
  5938. goto out;
  5939. port->port_id = port_id;
  5940. ioc_info(ioc,
  5941. "hba_port entry: %p, port: %d is added to hba_port list\n",
  5942. port, port->port_id);
  5943. if (ioc->shost_recovery)
  5944. port->flags = HBA_PORT_FLAG_NEW_PORT;
  5945. list_add_tail(&port->list, &ioc->port_table_list);
  5946. }
  5947. /*
  5948. * Check whether current Phy belongs to HBA vSES device or not.
  5949. */
  5950. if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
  5951. MPI2_SAS_DEVICE_INFO_SEP &&
  5952. (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
  5953. if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
  5954. &phy_pg0, i))) {
  5955. ioc_err(ioc,
  5956. "failure at %s:%d/%s()!\n",
  5957. __FILE__, __LINE__, __func__);
  5958. goto out;
  5959. }
  5960. if (!(le32_to_cpu(phy_pg0.PhyInfo) &
  5961. MPI2_SAS_PHYINFO_VIRTUAL_PHY))
  5962. continue;
  5963. /*
  5964. * Allocate a virtual_phy object for vSES device, if
  5965. * this vSES device is hot added.
  5966. */
  5967. if (!_scsih_alloc_vphy(ioc, port_id, i))
  5968. goto out;
  5969. ioc->sas_hba.phy[i].hba_vphy = 1;
  5970. }
  5971. /*
  5972. * Add new HBA phys to STL if these new phys got added as part
  5973. * of HBA Firmware upgrade/downgrade operation.
  5974. */
  5975. if (!ioc->sas_hba.phy[i].phy) {
  5976. if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
  5977. &phy_pg0, i))) {
  5978. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  5979. __FILE__, __LINE__, __func__);
  5980. continue;
  5981. }
  5982. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  5983. MPI2_IOCSTATUS_MASK;
  5984. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  5985. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  5986. __FILE__, __LINE__, __func__);
  5987. continue;
  5988. }
  5989. ioc->sas_hba.phy[i].phy_id = i;
  5990. mpt3sas_transport_add_host_phy(ioc,
  5991. &ioc->sas_hba.phy[i], phy_pg0,
  5992. ioc->sas_hba.parent_dev);
  5993. continue;
  5994. }
  5995. ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
  5996. attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
  5997. AttachedDevHandle);
  5998. if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
  5999. link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
  6000. ioc->sas_hba.phy[i].port =
  6001. mpt3sas_get_port_by_id(ioc, port_id, 0);
  6002. mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
  6003. attached_handle, i, link_rate,
  6004. ioc->sas_hba.phy[i].port);
  6005. }
  6006. /*
  6007. * Clear the phy details if this phy got disabled as part of
  6008. * HBA Firmware upgrade/downgrade operation.
  6009. */
  6010. for (i = ioc->sas_hba.num_phys;
  6011. i < ioc->sas_hba.nr_phys_allocated; i++) {
  6012. if (ioc->sas_hba.phy[i].phy &&
  6013. ioc->sas_hba.phy[i].phy->negotiated_linkrate >=
  6014. SAS_LINK_RATE_1_5_GBPS)
  6015. mpt3sas_transport_update_links(ioc,
  6016. ioc->sas_hba.sas_address, 0, i,
  6017. MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL);
  6018. }
  6019. out:
  6020. kfree(sas_iounit_pg0);
  6021. }
  6022. /**
  6023. * _scsih_sas_host_add - create sas host object
  6024. * @ioc: per adapter object
  6025. *
  6026. * Creating host side data object, stored in ioc->sas_hba
  6027. */
  6028. static void
  6029. _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
  6030. {
  6031. int i;
  6032. Mpi2ConfigReply_t mpi_reply;
  6033. Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
  6034. Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
  6035. Mpi2SasPhyPage0_t phy_pg0;
  6036. Mpi2SasDevicePage0_t sas_device_pg0;
  6037. Mpi2SasEnclosurePage0_t enclosure_pg0;
  6038. u16 ioc_status;
  6039. u16 sz;
  6040. u8 device_missing_delay;
  6041. u8 num_phys, port_id;
  6042. struct hba_port *port;
  6043. mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
  6044. if (!num_phys) {
  6045. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6046. __FILE__, __LINE__, __func__);
  6047. return;
  6048. }
  6049. ioc->sas_hba.nr_phys_allocated = max_t(u8,
  6050. MPT_MAX_HBA_NUM_PHYS, num_phys);
  6051. ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated,
  6052. sizeof(struct _sas_phy), GFP_KERNEL);
  6053. if (!ioc->sas_hba.phy) {
  6054. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6055. __FILE__, __LINE__, __func__);
  6056. goto out;
  6057. }
  6058. ioc->sas_hba.num_phys = num_phys;
  6059. /* sas_iounit page 0 */
  6060. sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
  6061. sizeof(Mpi2SasIOUnit0PhyData_t));
  6062. sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
  6063. if (!sas_iounit_pg0) {
  6064. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6065. __FILE__, __LINE__, __func__);
  6066. return;
  6067. }
  6068. if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
  6069. sas_iounit_pg0, sz))) {
  6070. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6071. __FILE__, __LINE__, __func__);
  6072. goto out;
  6073. }
  6074. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  6075. MPI2_IOCSTATUS_MASK;
  6076. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  6077. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6078. __FILE__, __LINE__, __func__);
  6079. goto out;
  6080. }
  6081. /* sas_iounit page 1 */
  6082. sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
  6083. sizeof(Mpi2SasIOUnit1PhyData_t));
  6084. sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
  6085. if (!sas_iounit_pg1) {
  6086. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6087. __FILE__, __LINE__, __func__);
  6088. goto out;
  6089. }
  6090. if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
  6091. sas_iounit_pg1, sz))) {
  6092. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6093. __FILE__, __LINE__, __func__);
  6094. goto out;
  6095. }
  6096. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  6097. MPI2_IOCSTATUS_MASK;
  6098. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  6099. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6100. __FILE__, __LINE__, __func__);
  6101. goto out;
  6102. }
  6103. ioc->io_missing_delay =
  6104. sas_iounit_pg1->IODeviceMissingDelay;
  6105. device_missing_delay =
  6106. sas_iounit_pg1->ReportDeviceMissingDelay;
  6107. if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
  6108. ioc->device_missing_delay = (device_missing_delay &
  6109. MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
  6110. else
  6111. ioc->device_missing_delay = device_missing_delay &
  6112. MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
  6113. ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
  6114. for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
  6115. if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
  6116. i))) {
  6117. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6118. __FILE__, __LINE__, __func__);
  6119. goto out;
  6120. }
  6121. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  6122. MPI2_IOCSTATUS_MASK;
  6123. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  6124. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6125. __FILE__, __LINE__, __func__);
  6126. goto out;
  6127. }
  6128. if (i == 0)
  6129. ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
  6130. PhyData[0].ControllerDevHandle);
  6131. port_id = sas_iounit_pg0->PhyData[i].Port;
  6132. if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
  6133. port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
  6134. if (!port)
  6135. goto out;
  6136. port->port_id = port_id;
  6137. ioc_info(ioc,
  6138. "hba_port entry: %p, port: %d is added to hba_port list\n",
  6139. port, port->port_id);
  6140. list_add_tail(&port->list,
  6141. &ioc->port_table_list);
  6142. }
  6143. /*
  6144. * Check whether current Phy belongs to HBA vSES device or not.
  6145. */
  6146. if ((le32_to_cpu(phy_pg0.PhyInfo) &
  6147. MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
  6148. (phy_pg0.NegotiatedLinkRate >> 4) >=
  6149. MPI2_SAS_NEG_LINK_RATE_1_5) {
  6150. /*
  6151. * Allocate a virtual_phy object for vSES device.
  6152. */
  6153. if (!_scsih_alloc_vphy(ioc, port_id, i))
  6154. goto out;
  6155. ioc->sas_hba.phy[i].hba_vphy = 1;
  6156. }
  6157. ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
  6158. ioc->sas_hba.phy[i].phy_id = i;
  6159. ioc->sas_hba.phy[i].port =
  6160. mpt3sas_get_port_by_id(ioc, port_id, 0);
  6161. mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
  6162. phy_pg0, ioc->sas_hba.parent_dev);
  6163. }
  6164. if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
  6165. MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
  6166. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6167. __FILE__, __LINE__, __func__);
  6168. goto out;
  6169. }
  6170. ioc->sas_hba.enclosure_handle =
  6171. le16_to_cpu(sas_device_pg0.EnclosureHandle);
  6172. ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
  6173. ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
  6174. ioc->sas_hba.handle,
  6175. (u64)ioc->sas_hba.sas_address,
  6176. ioc->sas_hba.num_phys);
  6177. if (ioc->sas_hba.enclosure_handle) {
  6178. if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
  6179. &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
  6180. ioc->sas_hba.enclosure_handle)))
  6181. ioc->sas_hba.enclosure_logical_id =
  6182. le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
  6183. }
  6184. out:
  6185. kfree(sas_iounit_pg1);
  6186. kfree(sas_iounit_pg0);
  6187. }
  6188. /**
  6189. * _scsih_expander_add - creating expander object
  6190. * @ioc: per adapter object
  6191. * @handle: expander handle
  6192. *
  6193. * Creating expander object, stored in ioc->sas_expander_list.
  6194. *
  6195. * Return: 0 for success, else error.
  6196. */
  6197. static int
  6198. _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  6199. {
  6200. struct _sas_node *sas_expander;
  6201. struct _enclosure_node *enclosure_dev;
  6202. Mpi2ConfigReply_t mpi_reply;
  6203. Mpi2ExpanderPage0_t expander_pg0;
  6204. Mpi2ExpanderPage1_t expander_pg1;
  6205. u32 ioc_status;
  6206. u16 parent_handle;
  6207. u64 sas_address, sas_address_parent = 0;
  6208. int i;
  6209. unsigned long flags;
  6210. struct _sas_port *mpt3sas_port = NULL;
  6211. u8 port_id;
  6212. int rc = 0;
  6213. if (!handle)
  6214. return -1;
  6215. if (ioc->shost_recovery || ioc->pci_error_recovery)
  6216. return -1;
  6217. if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
  6218. MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
  6219. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6220. __FILE__, __LINE__, __func__);
  6221. return -1;
  6222. }
  6223. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  6224. MPI2_IOCSTATUS_MASK;
  6225. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  6226. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6227. __FILE__, __LINE__, __func__);
  6228. return -1;
  6229. }
  6230. /* handle out of order topology events */
  6231. parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
  6232. if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
  6233. != 0) {
  6234. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6235. __FILE__, __LINE__, __func__);
  6236. return -1;
  6237. }
  6238. port_id = expander_pg0.PhysicalPort;
  6239. if (sas_address_parent != ioc->sas_hba.sas_address) {
  6240. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  6241. sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
  6242. sas_address_parent,
  6243. mpt3sas_get_port_by_id(ioc, port_id, 0));
  6244. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  6245. if (!sas_expander) {
  6246. rc = _scsih_expander_add(ioc, parent_handle);
  6247. if (rc != 0)
  6248. return rc;
  6249. }
  6250. }
  6251. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  6252. sas_address = le64_to_cpu(expander_pg0.SASAddress);
  6253. sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
  6254. sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
  6255. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  6256. if (sas_expander)
  6257. return 0;
  6258. sas_expander = kzalloc(sizeof(struct _sas_node),
  6259. GFP_KERNEL);
  6260. if (!sas_expander) {
  6261. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6262. __FILE__, __LINE__, __func__);
  6263. return -1;
  6264. }
  6265. sas_expander->handle = handle;
  6266. sas_expander->num_phys = expander_pg0.NumPhys;
  6267. sas_expander->sas_address_parent = sas_address_parent;
  6268. sas_expander->sas_address = sas_address;
  6269. sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
  6270. if (!sas_expander->port) {
  6271. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6272. __FILE__, __LINE__, __func__);
  6273. rc = -1;
  6274. goto out_fail;
  6275. }
  6276. ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
  6277. handle, parent_handle,
  6278. (u64)sas_expander->sas_address, sas_expander->num_phys);
  6279. if (!sas_expander->num_phys) {
  6280. rc = -1;
  6281. goto out_fail;
  6282. }
  6283. sas_expander->phy = kcalloc(sas_expander->num_phys,
  6284. sizeof(struct _sas_phy), GFP_KERNEL);
  6285. if (!sas_expander->phy) {
  6286. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6287. __FILE__, __LINE__, __func__);
  6288. rc = -1;
  6289. goto out_fail;
  6290. }
  6291. INIT_LIST_HEAD(&sas_expander->sas_port_list);
  6292. mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
  6293. sas_address_parent, sas_expander->port);
  6294. if (!mpt3sas_port) {
  6295. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6296. __FILE__, __LINE__, __func__);
  6297. rc = -1;
  6298. goto out_fail;
  6299. }
  6300. sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
  6301. sas_expander->rphy = mpt3sas_port->rphy;
  6302. for (i = 0 ; i < sas_expander->num_phys ; i++) {
  6303. if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
  6304. &expander_pg1, i, handle))) {
  6305. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6306. __FILE__, __LINE__, __func__);
  6307. rc = -1;
  6308. goto out_fail;
  6309. }
  6310. sas_expander->phy[i].handle = handle;
  6311. sas_expander->phy[i].phy_id = i;
  6312. sas_expander->phy[i].port =
  6313. mpt3sas_get_port_by_id(ioc, port_id, 0);
  6314. if ((mpt3sas_transport_add_expander_phy(ioc,
  6315. &sas_expander->phy[i], expander_pg1,
  6316. sas_expander->parent_dev))) {
  6317. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6318. __FILE__, __LINE__, __func__);
  6319. rc = -1;
  6320. goto out_fail;
  6321. }
  6322. }
  6323. if (sas_expander->enclosure_handle) {
  6324. enclosure_dev =
  6325. mpt3sas_scsih_enclosure_find_by_handle(ioc,
  6326. sas_expander->enclosure_handle);
  6327. if (enclosure_dev)
  6328. sas_expander->enclosure_logical_id =
  6329. le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
  6330. }
  6331. _scsih_expander_node_add(ioc, sas_expander);
  6332. return 0;
  6333. out_fail:
  6334. if (mpt3sas_port)
  6335. mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
  6336. sas_address_parent, sas_expander->port);
  6337. kfree(sas_expander);
  6338. return rc;
  6339. }
  6340. /**
  6341. * mpt3sas_expander_remove - removing expander object
  6342. * @ioc: per adapter object
  6343. * @sas_address: expander sas_address
  6344. * @port: hba port entry
  6345. */
  6346. void
  6347. mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
  6348. struct hba_port *port)
  6349. {
  6350. struct _sas_node *sas_expander;
  6351. unsigned long flags;
  6352. if (ioc->shost_recovery)
  6353. return;
  6354. if (!port)
  6355. return;
  6356. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  6357. sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
  6358. sas_address, port);
  6359. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  6360. if (sas_expander)
  6361. _scsih_expander_node_remove(ioc, sas_expander);
  6362. }
  6363. /**
  6364. * _scsih_done - internal SCSI_IO callback handler.
  6365. * @ioc: per adapter object
  6366. * @smid: system request message index
  6367. * @msix_index: MSIX table index supplied by the OS
  6368. * @reply: reply message frame(lower 32bit addr)
  6369. *
  6370. * Callback handler when sending internal generated SCSI_IO.
  6371. * The callback index passed is `ioc->scsih_cb_idx`
  6372. *
  6373. * Return: 1 meaning mf should be freed from _base_interrupt
  6374. * 0 means the mf is freed from this function.
  6375. */
  6376. static u8
  6377. _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
  6378. {
  6379. MPI2DefaultReply_t *mpi_reply;
  6380. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  6381. if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
  6382. return 1;
  6383. if (ioc->scsih_cmds.smid != smid)
  6384. return 1;
  6385. ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
  6386. if (mpi_reply) {
  6387. memcpy(ioc->scsih_cmds.reply, mpi_reply,
  6388. mpi_reply->MsgLength*4);
  6389. ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
  6390. }
  6391. ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
  6392. complete(&ioc->scsih_cmds.done);
  6393. return 1;
  6394. }
  6395. #define MPT3_MAX_LUNS (255)
  6396. /**
  6397. * _scsih_check_access_status - check access flags
  6398. * @ioc: per adapter object
  6399. * @sas_address: sas address
  6400. * @handle: sas device handle
  6401. * @access_status: errors returned during discovery of the device
  6402. *
  6403. * Return: 0 for success, else failure
  6404. */
  6405. static u8
  6406. _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
  6407. u16 handle, u8 access_status)
  6408. {
  6409. u8 rc = 1;
  6410. char *desc = NULL;
  6411. switch (access_status) {
  6412. case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
  6413. case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
  6414. rc = 0;
  6415. break;
  6416. case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
  6417. desc = "sata capability failed";
  6418. break;
  6419. case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
  6420. desc = "sata affiliation conflict";
  6421. break;
  6422. case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
  6423. desc = "route not addressable";
  6424. break;
  6425. case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
  6426. desc = "smp error not addressable";
  6427. break;
  6428. case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
  6429. desc = "device blocked";
  6430. break;
  6431. case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
  6432. case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
  6433. case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
  6434. case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
  6435. case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
  6436. case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
  6437. case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
  6438. case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
  6439. case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
  6440. case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
  6441. case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
  6442. case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
  6443. desc = "sata initialization failed";
  6444. break;
  6445. default:
  6446. desc = "unknown";
  6447. break;
  6448. }
  6449. if (!rc)
  6450. return 0;
  6451. ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
  6452. desc, (u64)sas_address, handle);
  6453. return rc;
  6454. }
  6455. /**
  6456. * _scsih_check_device - checking device responsiveness
  6457. * @ioc: per adapter object
  6458. * @parent_sas_address: sas address of parent expander or sas host
  6459. * @handle: attached device handle
  6460. * @phy_number: phy number
  6461. * @link_rate: new link rate
  6462. */
  6463. static void
  6464. _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
  6465. u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
  6466. {
  6467. Mpi2ConfigReply_t mpi_reply;
  6468. Mpi2SasDevicePage0_t sas_device_pg0;
  6469. struct _sas_device *sas_device = NULL;
  6470. struct _enclosure_node *enclosure_dev = NULL;
  6471. u32 ioc_status;
  6472. unsigned long flags;
  6473. u64 sas_address;
  6474. struct scsi_target *starget;
  6475. struct MPT3SAS_TARGET *sas_target_priv_data;
  6476. u32 device_info;
  6477. struct hba_port *port;
  6478. if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
  6479. MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
  6480. return;
  6481. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
  6482. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  6483. return;
  6484. /* wide port handling ~ we need only handle device once for the phy that
  6485. * is matched in sas device page zero
  6486. */
  6487. if (phy_number != sas_device_pg0.PhyNum)
  6488. return;
  6489. /* check if this is end device */
  6490. device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
  6491. if (!(_scsih_is_end_device(device_info)))
  6492. return;
  6493. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  6494. sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
  6495. port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
  6496. if (!port)
  6497. goto out_unlock;
  6498. sas_device = __mpt3sas_get_sdev_by_addr(ioc,
  6499. sas_address, port);
  6500. if (!sas_device)
  6501. goto out_unlock;
  6502. if (unlikely(sas_device->handle != handle)) {
  6503. starget = sas_device->starget;
  6504. sas_target_priv_data = starget->hostdata;
  6505. starget_printk(KERN_INFO, starget,
  6506. "handle changed from(0x%04x) to (0x%04x)!!!\n",
  6507. sas_device->handle, handle);
  6508. sas_target_priv_data->handle = handle;
  6509. sas_device->handle = handle;
  6510. if (le16_to_cpu(sas_device_pg0.Flags) &
  6511. MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
  6512. sas_device->enclosure_level =
  6513. sas_device_pg0.EnclosureLevel;
  6514. memcpy(sas_device->connector_name,
  6515. sas_device_pg0.ConnectorName, 4);
  6516. sas_device->connector_name[4] = '\0';
  6517. } else {
  6518. sas_device->enclosure_level = 0;
  6519. sas_device->connector_name[0] = '\0';
  6520. }
  6521. sas_device->enclosure_handle =
  6522. le16_to_cpu(sas_device_pg0.EnclosureHandle);
  6523. sas_device->is_chassis_slot_valid = 0;
  6524. enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
  6525. sas_device->enclosure_handle);
  6526. if (enclosure_dev) {
  6527. sas_device->enclosure_logical_id =
  6528. le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
  6529. if (le16_to_cpu(enclosure_dev->pg0.Flags) &
  6530. MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
  6531. sas_device->is_chassis_slot_valid = 1;
  6532. sas_device->chassis_slot =
  6533. enclosure_dev->pg0.ChassisSlot;
  6534. }
  6535. }
  6536. }
  6537. /* check if device is present */
  6538. if (!(le16_to_cpu(sas_device_pg0.Flags) &
  6539. MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
  6540. ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
  6541. handle);
  6542. goto out_unlock;
  6543. }
  6544. /* check if there were any issues with discovery */
  6545. if (_scsih_check_access_status(ioc, sas_address, handle,
  6546. sas_device_pg0.AccessStatus))
  6547. goto out_unlock;
  6548. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  6549. _scsih_ublock_io_device(ioc, sas_address, port);
  6550. if (sas_device)
  6551. sas_device_put(sas_device);
  6552. return;
  6553. out_unlock:
  6554. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  6555. if (sas_device)
  6556. sas_device_put(sas_device);
  6557. }
  6558. /**
  6559. * _scsih_add_device - creating sas device object
  6560. * @ioc: per adapter object
  6561. * @handle: sas device handle
  6562. * @phy_num: phy number end device attached to
  6563. * @is_pd: is this hidden raid component
  6564. *
  6565. * Creating end device object, stored in ioc->sas_device_list.
  6566. *
  6567. * Return: 0 for success, non-zero for failure.
  6568. */
  6569. static int
  6570. _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
  6571. u8 is_pd)
  6572. {
  6573. Mpi2ConfigReply_t mpi_reply;
  6574. Mpi2SasDevicePage0_t sas_device_pg0;
  6575. struct _sas_device *sas_device;
  6576. struct _enclosure_node *enclosure_dev = NULL;
  6577. u32 ioc_status;
  6578. u64 sas_address;
  6579. u32 device_info;
  6580. u8 port_id;
  6581. if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
  6582. MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
  6583. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6584. __FILE__, __LINE__, __func__);
  6585. return -1;
  6586. }
  6587. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  6588. MPI2_IOCSTATUS_MASK;
  6589. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  6590. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6591. __FILE__, __LINE__, __func__);
  6592. return -1;
  6593. }
  6594. /* check if this is end device */
  6595. device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
  6596. if (!(_scsih_is_end_device(device_info)))
  6597. return -1;
  6598. set_bit(handle, ioc->pend_os_device_add);
  6599. sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
  6600. /* check if device is present */
  6601. if (!(le16_to_cpu(sas_device_pg0.Flags) &
  6602. MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
  6603. ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
  6604. handle);
  6605. return -1;
  6606. }
  6607. /* check if there were any issues with discovery */
  6608. if (_scsih_check_access_status(ioc, sas_address, handle,
  6609. sas_device_pg0.AccessStatus))
  6610. return -1;
  6611. port_id = sas_device_pg0.PhysicalPort;
  6612. sas_device = mpt3sas_get_sdev_by_addr(ioc,
  6613. sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
  6614. if (sas_device) {
  6615. clear_bit(handle, ioc->pend_os_device_add);
  6616. sas_device_put(sas_device);
  6617. return -1;
  6618. }
  6619. if (sas_device_pg0.EnclosureHandle) {
  6620. enclosure_dev =
  6621. mpt3sas_scsih_enclosure_find_by_handle(ioc,
  6622. le16_to_cpu(sas_device_pg0.EnclosureHandle));
  6623. if (enclosure_dev == NULL)
  6624. ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
  6625. sas_device_pg0.EnclosureHandle);
  6626. }
  6627. sas_device = kzalloc(sizeof(struct _sas_device),
  6628. GFP_KERNEL);
  6629. if (!sas_device) {
  6630. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6631. __FILE__, __LINE__, __func__);
  6632. return 0;
  6633. }
  6634. kref_init(&sas_device->refcount);
  6635. sas_device->handle = handle;
  6636. if (_scsih_get_sas_address(ioc,
  6637. le16_to_cpu(sas_device_pg0.ParentDevHandle),
  6638. &sas_device->sas_address_parent) != 0)
  6639. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6640. __FILE__, __LINE__, __func__);
  6641. sas_device->enclosure_handle =
  6642. le16_to_cpu(sas_device_pg0.EnclosureHandle);
  6643. if (sas_device->enclosure_handle != 0)
  6644. sas_device->slot =
  6645. le16_to_cpu(sas_device_pg0.Slot);
  6646. sas_device->device_info = device_info;
  6647. sas_device->sas_address = sas_address;
  6648. sas_device->phy = sas_device_pg0.PhyNum;
  6649. sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
  6650. MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
  6651. sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
  6652. if (!sas_device->port) {
  6653. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  6654. __FILE__, __LINE__, __func__);
  6655. goto out;
  6656. }
  6657. if (le16_to_cpu(sas_device_pg0.Flags)
  6658. & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
  6659. sas_device->enclosure_level =
  6660. sas_device_pg0.EnclosureLevel;
  6661. memcpy(sas_device->connector_name,
  6662. sas_device_pg0.ConnectorName, 4);
  6663. sas_device->connector_name[4] = '\0';
  6664. } else {
  6665. sas_device->enclosure_level = 0;
  6666. sas_device->connector_name[0] = '\0';
  6667. }
  6668. /* get enclosure_logical_id & chassis_slot*/
  6669. sas_device->is_chassis_slot_valid = 0;
  6670. if (enclosure_dev) {
  6671. sas_device->enclosure_logical_id =
  6672. le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
  6673. if (le16_to_cpu(enclosure_dev->pg0.Flags) &
  6674. MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
  6675. sas_device->is_chassis_slot_valid = 1;
  6676. sas_device->chassis_slot =
  6677. enclosure_dev->pg0.ChassisSlot;
  6678. }
  6679. }
  6680. /* get device name */
  6681. sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
  6682. sas_device->port_type = sas_device_pg0.MaxPortConnections;
  6683. ioc_info(ioc,
  6684. "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
  6685. handle, sas_device->sas_address, sas_device->port_type);
  6686. if (ioc->wait_for_discovery_to_complete)
  6687. _scsih_sas_device_init_add(ioc, sas_device);
  6688. else
  6689. _scsih_sas_device_add(ioc, sas_device);
  6690. out:
  6691. sas_device_put(sas_device);
  6692. return 0;
  6693. }
  6694. /**
  6695. * _scsih_remove_device - removing sas device object
  6696. * @ioc: per adapter object
  6697. * @sas_device: the sas_device object
  6698. */
  6699. static void
  6700. _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
  6701. struct _sas_device *sas_device)
  6702. {
  6703. struct MPT3SAS_TARGET *sas_target_priv_data;
  6704. if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
  6705. (sas_device->pfa_led_on)) {
  6706. _scsih_turn_off_pfa_led(ioc, sas_device);
  6707. sas_device->pfa_led_on = 0;
  6708. }
  6709. dewtprintk(ioc,
  6710. ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
  6711. __func__,
  6712. sas_device->handle, (u64)sas_device->sas_address));
  6713. dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
  6714. NULL, NULL));
  6715. if (sas_device->starget && sas_device->starget->hostdata) {
  6716. sas_target_priv_data = sas_device->starget->hostdata;
  6717. sas_target_priv_data->deleted = 1;
  6718. _scsih_ublock_io_device(ioc, sas_device->sas_address,
  6719. sas_device->port);
  6720. sas_target_priv_data->handle =
  6721. MPT3SAS_INVALID_DEVICE_HANDLE;
  6722. }
  6723. if (!ioc->hide_drives)
  6724. mpt3sas_transport_port_remove(ioc,
  6725. sas_device->sas_address,
  6726. sas_device->sas_address_parent,
  6727. sas_device->port);
  6728. ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
  6729. sas_device->handle, (u64)sas_device->sas_address);
  6730. _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
  6731. dewtprintk(ioc,
  6732. ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
  6733. __func__,
  6734. sas_device->handle, (u64)sas_device->sas_address));
  6735. dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
  6736. NULL, NULL));
  6737. }
  6738. /**
  6739. * _scsih_sas_topology_change_event_debug - debug for topology event
  6740. * @ioc: per adapter object
  6741. * @event_data: event data payload
  6742. * Context: user.
  6743. */
  6744. static void
  6745. _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
  6746. Mpi2EventDataSasTopologyChangeList_t *event_data)
  6747. {
  6748. int i;
  6749. u16 handle;
  6750. u16 reason_code;
  6751. u8 phy_number;
  6752. char *status_str = NULL;
  6753. u8 link_rate, prev_link_rate;
  6754. switch (event_data->ExpStatus) {
  6755. case MPI2_EVENT_SAS_TOPO_ES_ADDED:
  6756. status_str = "add";
  6757. break;
  6758. case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
  6759. status_str = "remove";
  6760. break;
  6761. case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
  6762. case 0:
  6763. status_str = "responding";
  6764. break;
  6765. case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
  6766. status_str = "remove delay";
  6767. break;
  6768. default:
  6769. status_str = "unknown status";
  6770. break;
  6771. }
  6772. ioc_info(ioc, "sas topology change: (%s)\n", status_str);
  6773. pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
  6774. "start_phy(%02d), count(%d)\n",
  6775. le16_to_cpu(event_data->ExpanderDevHandle),
  6776. le16_to_cpu(event_data->EnclosureHandle),
  6777. event_data->StartPhyNum, event_data->NumEntries);
  6778. for (i = 0; i < event_data->NumEntries; i++) {
  6779. handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
  6780. if (!handle)
  6781. continue;
  6782. phy_number = event_data->StartPhyNum + i;
  6783. reason_code = event_data->PHY[i].PhyStatus &
  6784. MPI2_EVENT_SAS_TOPO_RC_MASK;
  6785. switch (reason_code) {
  6786. case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
  6787. status_str = "target add";
  6788. break;
  6789. case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
  6790. status_str = "target remove";
  6791. break;
  6792. case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
  6793. status_str = "delay target remove";
  6794. break;
  6795. case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
  6796. status_str = "link rate change";
  6797. break;
  6798. case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
  6799. status_str = "target responding";
  6800. break;
  6801. default:
  6802. status_str = "unknown";
  6803. break;
  6804. }
  6805. link_rate = event_data->PHY[i].LinkRate >> 4;
  6806. prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
  6807. pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
  6808. " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
  6809. handle, status_str, link_rate, prev_link_rate);
  6810. }
  6811. }
  6812. /**
  6813. * _scsih_sas_topology_change_event - handle topology changes
  6814. * @ioc: per adapter object
  6815. * @fw_event: The fw_event_work object
  6816. * Context: user.
  6817. *
  6818. */
  6819. static int
  6820. _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
  6821. struct fw_event_work *fw_event)
  6822. {
  6823. int i;
  6824. u16 parent_handle, handle;
  6825. u16 reason_code;
  6826. u8 phy_number, max_phys;
  6827. struct _sas_node *sas_expander;
  6828. u64 sas_address;
  6829. unsigned long flags;
  6830. u8 link_rate, prev_link_rate;
  6831. struct hba_port *port;
  6832. Mpi2EventDataSasTopologyChangeList_t *event_data =
  6833. (Mpi2EventDataSasTopologyChangeList_t *)
  6834. fw_event->event_data;
  6835. if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
  6836. _scsih_sas_topology_change_event_debug(ioc, event_data);
  6837. if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
  6838. return 0;
  6839. if (!ioc->sas_hba.num_phys)
  6840. _scsih_sas_host_add(ioc);
  6841. else
  6842. _scsih_sas_host_refresh(ioc);
  6843. if (fw_event->ignore) {
  6844. dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
  6845. return 0;
  6846. }
  6847. parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
  6848. port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
  6849. /* handle expander add */
  6850. if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
  6851. if (_scsih_expander_add(ioc, parent_handle) != 0)
  6852. return 0;
  6853. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  6854. sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
  6855. parent_handle);
  6856. if (sas_expander) {
  6857. sas_address = sas_expander->sas_address;
  6858. max_phys = sas_expander->num_phys;
  6859. port = sas_expander->port;
  6860. } else if (parent_handle < ioc->sas_hba.num_phys) {
  6861. sas_address = ioc->sas_hba.sas_address;
  6862. max_phys = ioc->sas_hba.num_phys;
  6863. } else {
  6864. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  6865. return 0;
  6866. }
  6867. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  6868. /* handle siblings events */
  6869. for (i = 0; i < event_data->NumEntries; i++) {
  6870. if (fw_event->ignore) {
  6871. dewtprintk(ioc,
  6872. ioc_info(ioc, "ignoring expander event\n"));
  6873. return 0;
  6874. }
  6875. if (ioc->remove_host || ioc->pci_error_recovery)
  6876. return 0;
  6877. phy_number = event_data->StartPhyNum + i;
  6878. if (phy_number >= max_phys)
  6879. continue;
  6880. reason_code = event_data->PHY[i].PhyStatus &
  6881. MPI2_EVENT_SAS_TOPO_RC_MASK;
  6882. if ((event_data->PHY[i].PhyStatus &
  6883. MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
  6884. MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
  6885. continue;
  6886. handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
  6887. if (!handle)
  6888. continue;
  6889. link_rate = event_data->PHY[i].LinkRate >> 4;
  6890. prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
  6891. switch (reason_code) {
  6892. case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
  6893. if (ioc->shost_recovery)
  6894. break;
  6895. if (link_rate == prev_link_rate)
  6896. break;
  6897. mpt3sas_transport_update_links(ioc, sas_address,
  6898. handle, phy_number, link_rate, port);
  6899. if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
  6900. break;
  6901. _scsih_check_device(ioc, sas_address, handle,
  6902. phy_number, link_rate);
  6903. if (!test_bit(handle, ioc->pend_os_device_add))
  6904. break;
  6905. fallthrough;
  6906. case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
  6907. if (ioc->shost_recovery)
  6908. break;
  6909. mpt3sas_transport_update_links(ioc, sas_address,
  6910. handle, phy_number, link_rate, port);
  6911. _scsih_add_device(ioc, handle, phy_number, 0);
  6912. break;
  6913. case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
  6914. _scsih_device_remove_by_handle(ioc, handle);
  6915. break;
  6916. }
  6917. }
  6918. /* handle expander removal */
  6919. if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
  6920. sas_expander)
  6921. mpt3sas_expander_remove(ioc, sas_address, port);
  6922. return 0;
  6923. }
  6924. /**
  6925. * _scsih_sas_device_status_change_event_debug - debug for device event
  6926. * @ioc: ?
  6927. * @event_data: event data payload
  6928. * Context: user.
  6929. */
  6930. static void
  6931. _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
  6932. Mpi2EventDataSasDeviceStatusChange_t *event_data)
  6933. {
  6934. char *reason_str = NULL;
  6935. switch (event_data->ReasonCode) {
  6936. case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
  6937. reason_str = "smart data";
  6938. break;
  6939. case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
  6940. reason_str = "unsupported device discovered";
  6941. break;
  6942. case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
  6943. reason_str = "internal device reset";
  6944. break;
  6945. case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
  6946. reason_str = "internal task abort";
  6947. break;
  6948. case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
  6949. reason_str = "internal task abort set";
  6950. break;
  6951. case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
  6952. reason_str = "internal clear task set";
  6953. break;
  6954. case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
  6955. reason_str = "internal query task";
  6956. break;
  6957. case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
  6958. reason_str = "sata init failure";
  6959. break;
  6960. case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
  6961. reason_str = "internal device reset complete";
  6962. break;
  6963. case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
  6964. reason_str = "internal task abort complete";
  6965. break;
  6966. case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
  6967. reason_str = "internal async notification";
  6968. break;
  6969. case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
  6970. reason_str = "expander reduced functionality";
  6971. break;
  6972. case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
  6973. reason_str = "expander reduced functionality complete";
  6974. break;
  6975. default:
  6976. reason_str = "unknown reason";
  6977. break;
  6978. }
  6979. ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
  6980. reason_str, le16_to_cpu(event_data->DevHandle),
  6981. (u64)le64_to_cpu(event_data->SASAddress),
  6982. le16_to_cpu(event_data->TaskTag));
  6983. if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
  6984. pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
  6985. event_data->ASC, event_data->ASCQ);
  6986. pr_cont("\n");
  6987. }
  6988. /**
  6989. * _scsih_sas_device_status_change_event - handle device status change
  6990. * @ioc: per adapter object
  6991. * @event_data: The fw event
  6992. * Context: user.
  6993. */
  6994. static void
  6995. _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
  6996. Mpi2EventDataSasDeviceStatusChange_t *event_data)
  6997. {
  6998. struct MPT3SAS_TARGET *target_priv_data;
  6999. struct _sas_device *sas_device;
  7000. u64 sas_address;
  7001. unsigned long flags;
  7002. /* In MPI Revision K (0xC), the internal device reset complete was
  7003. * implemented, so avoid setting tm_busy flag for older firmware.
  7004. */
  7005. if ((ioc->facts.HeaderVersion >> 8) < 0xC)
  7006. return;
  7007. if (event_data->ReasonCode !=
  7008. MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
  7009. event_data->ReasonCode !=
  7010. MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
  7011. return;
  7012. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  7013. sas_address = le64_to_cpu(event_data->SASAddress);
  7014. sas_device = __mpt3sas_get_sdev_by_addr(ioc,
  7015. sas_address,
  7016. mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
  7017. if (!sas_device || !sas_device->starget)
  7018. goto out;
  7019. target_priv_data = sas_device->starget->hostdata;
  7020. if (!target_priv_data)
  7021. goto out;
  7022. if (event_data->ReasonCode ==
  7023. MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
  7024. target_priv_data->tm_busy = 1;
  7025. else
  7026. target_priv_data->tm_busy = 0;
  7027. if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
  7028. ioc_info(ioc,
  7029. "%s tm_busy flag for handle(0x%04x)\n",
  7030. (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
  7031. target_priv_data->handle);
  7032. out:
  7033. if (sas_device)
  7034. sas_device_put(sas_device);
  7035. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  7036. }
  7037. /**
  7038. * _scsih_check_pcie_access_status - check access flags
  7039. * @ioc: per adapter object
  7040. * @wwid: wwid
  7041. * @handle: sas device handle
  7042. * @access_status: errors returned during discovery of the device
  7043. *
  7044. * Return: 0 for success, else failure
  7045. */
  7046. static u8
  7047. _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
  7048. u16 handle, u8 access_status)
  7049. {
  7050. u8 rc = 1;
  7051. char *desc = NULL;
  7052. switch (access_status) {
  7053. case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
  7054. case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
  7055. rc = 0;
  7056. break;
  7057. case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
  7058. desc = "PCIe device capability failed";
  7059. break;
  7060. case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
  7061. desc = "PCIe device blocked";
  7062. ioc_info(ioc,
  7063. "Device with Access Status (%s): wwid(0x%016llx), "
  7064. "handle(0x%04x)\n ll only be added to the internal list",
  7065. desc, (u64)wwid, handle);
  7066. rc = 0;
  7067. break;
  7068. case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
  7069. desc = "PCIe device mem space access failed";
  7070. break;
  7071. case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
  7072. desc = "PCIe device unsupported";
  7073. break;
  7074. case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
  7075. desc = "PCIe device MSIx Required";
  7076. break;
  7077. case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
  7078. desc = "PCIe device init fail max";
  7079. break;
  7080. case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
  7081. desc = "PCIe device status unknown";
  7082. break;
  7083. case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
  7084. desc = "nvme ready timeout";
  7085. break;
  7086. case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
  7087. desc = "nvme device configuration unsupported";
  7088. break;
  7089. case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
  7090. desc = "nvme identify failed";
  7091. break;
  7092. case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
  7093. desc = "nvme qconfig failed";
  7094. break;
  7095. case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
  7096. desc = "nvme qcreation failed";
  7097. break;
  7098. case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
  7099. desc = "nvme eventcfg failed";
  7100. break;
  7101. case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
  7102. desc = "nvme get feature stat failed";
  7103. break;
  7104. case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
  7105. desc = "nvme idle timeout";
  7106. break;
  7107. case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
  7108. desc = "nvme failure status";
  7109. break;
  7110. default:
  7111. ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
  7112. access_status, (u64)wwid, handle);
  7113. return rc;
  7114. }
  7115. if (!rc)
  7116. return rc;
  7117. ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
  7118. desc, (u64)wwid, handle);
  7119. return rc;
  7120. }
  7121. /**
  7122. * _scsih_pcie_device_remove_from_sml - removing pcie device
  7123. * from SML and free up associated memory
  7124. * @ioc: per adapter object
  7125. * @pcie_device: the pcie_device object
  7126. */
  7127. static void
  7128. _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
  7129. struct _pcie_device *pcie_device)
  7130. {
  7131. struct MPT3SAS_TARGET *sas_target_priv_data;
  7132. dewtprintk(ioc,
  7133. ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
  7134. __func__,
  7135. pcie_device->handle, (u64)pcie_device->wwid));
  7136. if (pcie_device->enclosure_handle != 0)
  7137. dewtprintk(ioc,
  7138. ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
  7139. __func__,
  7140. (u64)pcie_device->enclosure_logical_id,
  7141. pcie_device->slot));
  7142. if (pcie_device->connector_name[0] != '\0')
  7143. dewtprintk(ioc,
  7144. ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
  7145. __func__,
  7146. pcie_device->enclosure_level,
  7147. pcie_device->connector_name));
  7148. if (pcie_device->starget && pcie_device->starget->hostdata) {
  7149. sas_target_priv_data = pcie_device->starget->hostdata;
  7150. sas_target_priv_data->deleted = 1;
  7151. _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
  7152. sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
  7153. }
  7154. ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
  7155. pcie_device->handle, (u64)pcie_device->wwid);
  7156. if (pcie_device->enclosure_handle != 0)
  7157. ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
  7158. (u64)pcie_device->enclosure_logical_id,
  7159. pcie_device->slot);
  7160. if (pcie_device->connector_name[0] != '\0')
  7161. ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
  7162. pcie_device->enclosure_level,
  7163. pcie_device->connector_name);
  7164. if (pcie_device->starget && (pcie_device->access_status !=
  7165. MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
  7166. scsi_remove_target(&pcie_device->starget->dev);
  7167. dewtprintk(ioc,
  7168. ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
  7169. __func__,
  7170. pcie_device->handle, (u64)pcie_device->wwid));
  7171. if (pcie_device->enclosure_handle != 0)
  7172. dewtprintk(ioc,
  7173. ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
  7174. __func__,
  7175. (u64)pcie_device->enclosure_logical_id,
  7176. pcie_device->slot));
  7177. if (pcie_device->connector_name[0] != '\0')
  7178. dewtprintk(ioc,
  7179. ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
  7180. __func__,
  7181. pcie_device->enclosure_level,
  7182. pcie_device->connector_name));
  7183. kfree(pcie_device->serial_number);
  7184. }
  7185. /**
  7186. * _scsih_pcie_check_device - checking device responsiveness
  7187. * @ioc: per adapter object
  7188. * @handle: attached device handle
  7189. */
  7190. static void
  7191. _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  7192. {
  7193. Mpi2ConfigReply_t mpi_reply;
  7194. Mpi26PCIeDevicePage0_t pcie_device_pg0;
  7195. u32 ioc_status;
  7196. struct _pcie_device *pcie_device;
  7197. u64 wwid;
  7198. unsigned long flags;
  7199. struct scsi_target *starget;
  7200. struct MPT3SAS_TARGET *sas_target_priv_data;
  7201. u32 device_info;
  7202. if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
  7203. &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
  7204. return;
  7205. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
  7206. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  7207. return;
  7208. /* check if this is end device */
  7209. device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
  7210. if (!(_scsih_is_nvme_pciescsi_device(device_info)))
  7211. return;
  7212. wwid = le64_to_cpu(pcie_device_pg0.WWID);
  7213. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  7214. pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
  7215. if (!pcie_device) {
  7216. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  7217. return;
  7218. }
  7219. if (unlikely(pcie_device->handle != handle)) {
  7220. starget = pcie_device->starget;
  7221. sas_target_priv_data = starget->hostdata;
  7222. pcie_device->access_status = pcie_device_pg0.AccessStatus;
  7223. starget_printk(KERN_INFO, starget,
  7224. "handle changed from(0x%04x) to (0x%04x)!!!\n",
  7225. pcie_device->handle, handle);
  7226. sas_target_priv_data->handle = handle;
  7227. pcie_device->handle = handle;
  7228. if (le32_to_cpu(pcie_device_pg0.Flags) &
  7229. MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
  7230. pcie_device->enclosure_level =
  7231. pcie_device_pg0.EnclosureLevel;
  7232. memcpy(&pcie_device->connector_name[0],
  7233. &pcie_device_pg0.ConnectorName[0], 4);
  7234. } else {
  7235. pcie_device->enclosure_level = 0;
  7236. pcie_device->connector_name[0] = '\0';
  7237. }
  7238. }
  7239. /* check if device is present */
  7240. if (!(le32_to_cpu(pcie_device_pg0.Flags) &
  7241. MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
  7242. ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
  7243. handle);
  7244. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  7245. pcie_device_put(pcie_device);
  7246. return;
  7247. }
  7248. /* check if there were any issues with discovery */
  7249. if (_scsih_check_pcie_access_status(ioc, wwid, handle,
  7250. pcie_device_pg0.AccessStatus)) {
  7251. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  7252. pcie_device_put(pcie_device);
  7253. return;
  7254. }
  7255. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  7256. pcie_device_put(pcie_device);
  7257. _scsih_ublock_io_device(ioc, wwid, NULL);
  7258. return;
  7259. }
  7260. /**
  7261. * _scsih_pcie_add_device - creating pcie device object
  7262. * @ioc: per adapter object
  7263. * @handle: pcie device handle
  7264. *
  7265. * Creating end device object, stored in ioc->pcie_device_list.
  7266. *
  7267. * Return: 1 means queue the event later, 0 means complete the event
  7268. */
  7269. static int
  7270. _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  7271. {
  7272. Mpi26PCIeDevicePage0_t pcie_device_pg0;
  7273. Mpi26PCIeDevicePage2_t pcie_device_pg2;
  7274. Mpi2ConfigReply_t mpi_reply;
  7275. struct _pcie_device *pcie_device;
  7276. struct _enclosure_node *enclosure_dev;
  7277. u32 ioc_status;
  7278. u64 wwid;
  7279. if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
  7280. &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
  7281. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  7282. __FILE__, __LINE__, __func__);
  7283. return 0;
  7284. }
  7285. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  7286. MPI2_IOCSTATUS_MASK;
  7287. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  7288. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  7289. __FILE__, __LINE__, __func__);
  7290. return 0;
  7291. }
  7292. set_bit(handle, ioc->pend_os_device_add);
  7293. wwid = le64_to_cpu(pcie_device_pg0.WWID);
  7294. /* check if device is present */
  7295. if (!(le32_to_cpu(pcie_device_pg0.Flags) &
  7296. MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
  7297. ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
  7298. handle);
  7299. return 0;
  7300. }
  7301. /* check if there were any issues with discovery */
  7302. if (_scsih_check_pcie_access_status(ioc, wwid, handle,
  7303. pcie_device_pg0.AccessStatus))
  7304. return 0;
  7305. if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
  7306. (pcie_device_pg0.DeviceInfo))))
  7307. return 0;
  7308. pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
  7309. if (pcie_device) {
  7310. clear_bit(handle, ioc->pend_os_device_add);
  7311. pcie_device_put(pcie_device);
  7312. return 0;
  7313. }
  7314. /* PCIe Device Page 2 contains read-only information about a
  7315. * specific NVMe device; therefore, this page is only
  7316. * valid for NVMe devices and skip for pcie devices of type scsi.
  7317. */
  7318. if (!(mpt3sas_scsih_is_pcie_scsi_device(
  7319. le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
  7320. if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
  7321. &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
  7322. handle)) {
  7323. ioc_err(ioc,
  7324. "failure at %s:%d/%s()!\n", __FILE__,
  7325. __LINE__, __func__);
  7326. return 0;
  7327. }
  7328. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  7329. MPI2_IOCSTATUS_MASK;
  7330. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  7331. ioc_err(ioc,
  7332. "failure at %s:%d/%s()!\n", __FILE__,
  7333. __LINE__, __func__);
  7334. return 0;
  7335. }
  7336. }
  7337. pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
  7338. if (!pcie_device) {
  7339. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  7340. __FILE__, __LINE__, __func__);
  7341. return 0;
  7342. }
  7343. kref_init(&pcie_device->refcount);
  7344. pcie_device->id = ioc->pcie_target_id++;
  7345. pcie_device->channel = PCIE_CHANNEL;
  7346. pcie_device->handle = handle;
  7347. pcie_device->access_status = pcie_device_pg0.AccessStatus;
  7348. pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
  7349. pcie_device->wwid = wwid;
  7350. pcie_device->port_num = pcie_device_pg0.PortNum;
  7351. pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
  7352. MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
  7353. pcie_device->enclosure_handle =
  7354. le16_to_cpu(pcie_device_pg0.EnclosureHandle);
  7355. if (pcie_device->enclosure_handle != 0)
  7356. pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
  7357. if (le32_to_cpu(pcie_device_pg0.Flags) &
  7358. MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
  7359. pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
  7360. memcpy(&pcie_device->connector_name[0],
  7361. &pcie_device_pg0.ConnectorName[0], 4);
  7362. } else {
  7363. pcie_device->enclosure_level = 0;
  7364. pcie_device->connector_name[0] = '\0';
  7365. }
  7366. /* get enclosure_logical_id */
  7367. if (pcie_device->enclosure_handle) {
  7368. enclosure_dev =
  7369. mpt3sas_scsih_enclosure_find_by_handle(ioc,
  7370. pcie_device->enclosure_handle);
  7371. if (enclosure_dev)
  7372. pcie_device->enclosure_logical_id =
  7373. le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
  7374. }
  7375. /* TODO -- Add device name once FW supports it */
  7376. if (!(mpt3sas_scsih_is_pcie_scsi_device(
  7377. le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
  7378. pcie_device->nvme_mdts =
  7379. le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
  7380. pcie_device->shutdown_latency =
  7381. le16_to_cpu(pcie_device_pg2.ShutdownLatency);
  7382. /*
  7383. * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
  7384. * if drive's RTD3 Entry Latency is greater then IOC's
  7385. * max_shutdown_latency.
  7386. */
  7387. if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
  7388. ioc->max_shutdown_latency =
  7389. pcie_device->shutdown_latency;
  7390. if (pcie_device_pg2.ControllerResetTO)
  7391. pcie_device->reset_timeout =
  7392. pcie_device_pg2.ControllerResetTO;
  7393. else
  7394. pcie_device->reset_timeout = 30;
  7395. } else
  7396. pcie_device->reset_timeout = 30;
  7397. if (ioc->wait_for_discovery_to_complete)
  7398. _scsih_pcie_device_init_add(ioc, pcie_device);
  7399. else
  7400. _scsih_pcie_device_add(ioc, pcie_device);
  7401. pcie_device_put(pcie_device);
  7402. return 0;
  7403. }
  7404. /**
  7405. * _scsih_pcie_topology_change_event_debug - debug for topology
  7406. * event
  7407. * @ioc: per adapter object
  7408. * @event_data: event data payload
  7409. * Context: user.
  7410. */
  7411. static void
  7412. _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
  7413. Mpi26EventDataPCIeTopologyChangeList_t *event_data)
  7414. {
  7415. int i;
  7416. u16 handle;
  7417. u16 reason_code;
  7418. u8 port_number;
  7419. char *status_str = NULL;
  7420. u8 link_rate, prev_link_rate;
  7421. switch (event_data->SwitchStatus) {
  7422. case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
  7423. status_str = "add";
  7424. break;
  7425. case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
  7426. status_str = "remove";
  7427. break;
  7428. case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
  7429. case 0:
  7430. status_str = "responding";
  7431. break;
  7432. case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
  7433. status_str = "remove delay";
  7434. break;
  7435. default:
  7436. status_str = "unknown status";
  7437. break;
  7438. }
  7439. ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
  7440. pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
  7441. "start_port(%02d), count(%d)\n",
  7442. le16_to_cpu(event_data->SwitchDevHandle),
  7443. le16_to_cpu(event_data->EnclosureHandle),
  7444. event_data->StartPortNum, event_data->NumEntries);
  7445. for (i = 0; i < event_data->NumEntries; i++) {
  7446. handle =
  7447. le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
  7448. if (!handle)
  7449. continue;
  7450. port_number = event_data->StartPortNum + i;
  7451. reason_code = event_data->PortEntry[i].PortStatus;
  7452. switch (reason_code) {
  7453. case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
  7454. status_str = "target add";
  7455. break;
  7456. case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
  7457. status_str = "target remove";
  7458. break;
  7459. case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
  7460. status_str = "delay target remove";
  7461. break;
  7462. case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
  7463. status_str = "link rate change";
  7464. break;
  7465. case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
  7466. status_str = "target responding";
  7467. break;
  7468. default:
  7469. status_str = "unknown";
  7470. break;
  7471. }
  7472. link_rate = event_data->PortEntry[i].CurrentPortInfo &
  7473. MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
  7474. prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
  7475. MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
  7476. pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
  7477. " link rate: new(0x%02x), old(0x%02x)\n", port_number,
  7478. handle, status_str, link_rate, prev_link_rate);
  7479. }
  7480. }
  7481. /**
  7482. * _scsih_pcie_topology_change_event - handle PCIe topology
  7483. * changes
  7484. * @ioc: per adapter object
  7485. * @fw_event: The fw_event_work object
  7486. * Context: user.
  7487. *
  7488. */
  7489. static void
  7490. _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
  7491. struct fw_event_work *fw_event)
  7492. {
  7493. int i;
  7494. u16 handle;
  7495. u16 reason_code;
  7496. u8 link_rate, prev_link_rate;
  7497. unsigned long flags;
  7498. int rc;
  7499. Mpi26EventDataPCIeTopologyChangeList_t *event_data =
  7500. (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
  7501. struct _pcie_device *pcie_device;
  7502. if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
  7503. _scsih_pcie_topology_change_event_debug(ioc, event_data);
  7504. if (ioc->shost_recovery || ioc->remove_host ||
  7505. ioc->pci_error_recovery)
  7506. return;
  7507. if (fw_event->ignore) {
  7508. dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
  7509. return;
  7510. }
  7511. /* handle siblings events */
  7512. for (i = 0; i < event_data->NumEntries; i++) {
  7513. if (fw_event->ignore) {
  7514. dewtprintk(ioc,
  7515. ioc_info(ioc, "ignoring switch event\n"));
  7516. return;
  7517. }
  7518. if (ioc->remove_host || ioc->pci_error_recovery)
  7519. return;
  7520. reason_code = event_data->PortEntry[i].PortStatus;
  7521. handle =
  7522. le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
  7523. if (!handle)
  7524. continue;
  7525. link_rate = event_data->PortEntry[i].CurrentPortInfo
  7526. & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
  7527. prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
  7528. & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
  7529. switch (reason_code) {
  7530. case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
  7531. if (ioc->shost_recovery)
  7532. break;
  7533. if (link_rate == prev_link_rate)
  7534. break;
  7535. if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
  7536. break;
  7537. _scsih_pcie_check_device(ioc, handle);
  7538. /* This code after this point handles the test case
  7539. * where a device has been added, however its returning
  7540. * BUSY for sometime. Then before the Device Missing
  7541. * Delay expires and the device becomes READY, the
  7542. * device is removed and added back.
  7543. */
  7544. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  7545. pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
  7546. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  7547. if (pcie_device) {
  7548. pcie_device_put(pcie_device);
  7549. break;
  7550. }
  7551. if (!test_bit(handle, ioc->pend_os_device_add))
  7552. break;
  7553. dewtprintk(ioc,
  7554. ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
  7555. handle));
  7556. event_data->PortEntry[i].PortStatus &= 0xF0;
  7557. event_data->PortEntry[i].PortStatus |=
  7558. MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
  7559. fallthrough;
  7560. case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
  7561. if (ioc->shost_recovery)
  7562. break;
  7563. if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
  7564. break;
  7565. rc = _scsih_pcie_add_device(ioc, handle);
  7566. if (!rc) {
  7567. /* mark entry vacant */
  7568. /* TODO This needs to be reviewed and fixed,
  7569. * we dont have an entry
  7570. * to make an event void like vacant
  7571. */
  7572. event_data->PortEntry[i].PortStatus |=
  7573. MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
  7574. }
  7575. break;
  7576. case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
  7577. _scsih_pcie_device_remove_by_handle(ioc, handle);
  7578. break;
  7579. }
  7580. }
  7581. }
  7582. /**
  7583. * _scsih_pcie_device_status_change_event_debug - debug for device event
  7584. * @ioc: ?
  7585. * @event_data: event data payload
  7586. * Context: user.
  7587. */
  7588. static void
  7589. _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
  7590. Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
  7591. {
  7592. char *reason_str = NULL;
  7593. switch (event_data->ReasonCode) {
  7594. case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
  7595. reason_str = "smart data";
  7596. break;
  7597. case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
  7598. reason_str = "unsupported device discovered";
  7599. break;
  7600. case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
  7601. reason_str = "internal device reset";
  7602. break;
  7603. case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
  7604. reason_str = "internal task abort";
  7605. break;
  7606. case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
  7607. reason_str = "internal task abort set";
  7608. break;
  7609. case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
  7610. reason_str = "internal clear task set";
  7611. break;
  7612. case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
  7613. reason_str = "internal query task";
  7614. break;
  7615. case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
  7616. reason_str = "device init failure";
  7617. break;
  7618. case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
  7619. reason_str = "internal device reset complete";
  7620. break;
  7621. case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
  7622. reason_str = "internal task abort complete";
  7623. break;
  7624. case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
  7625. reason_str = "internal async notification";
  7626. break;
  7627. case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
  7628. reason_str = "pcie hot reset failed";
  7629. break;
  7630. default:
  7631. reason_str = "unknown reason";
  7632. break;
  7633. }
  7634. ioc_info(ioc, "PCIE device status change: (%s)\n"
  7635. "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
  7636. reason_str, le16_to_cpu(event_data->DevHandle),
  7637. (u64)le64_to_cpu(event_data->WWID),
  7638. le16_to_cpu(event_data->TaskTag));
  7639. if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
  7640. pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
  7641. event_data->ASC, event_data->ASCQ);
  7642. pr_cont("\n");
  7643. }
  7644. /**
  7645. * _scsih_pcie_device_status_change_event - handle device status
  7646. * change
  7647. * @ioc: per adapter object
  7648. * @fw_event: The fw_event_work object
  7649. * Context: user.
  7650. */
  7651. static void
  7652. _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
  7653. struct fw_event_work *fw_event)
  7654. {
  7655. struct MPT3SAS_TARGET *target_priv_data;
  7656. struct _pcie_device *pcie_device;
  7657. u64 wwid;
  7658. unsigned long flags;
  7659. Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
  7660. (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
  7661. if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
  7662. _scsih_pcie_device_status_change_event_debug(ioc,
  7663. event_data);
  7664. if (event_data->ReasonCode !=
  7665. MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
  7666. event_data->ReasonCode !=
  7667. MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
  7668. return;
  7669. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  7670. wwid = le64_to_cpu(event_data->WWID);
  7671. pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
  7672. if (!pcie_device || !pcie_device->starget)
  7673. goto out;
  7674. target_priv_data = pcie_device->starget->hostdata;
  7675. if (!target_priv_data)
  7676. goto out;
  7677. if (event_data->ReasonCode ==
  7678. MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
  7679. target_priv_data->tm_busy = 1;
  7680. else
  7681. target_priv_data->tm_busy = 0;
  7682. out:
  7683. if (pcie_device)
  7684. pcie_device_put(pcie_device);
  7685. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  7686. }
  7687. /**
  7688. * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
  7689. * event
  7690. * @ioc: per adapter object
  7691. * @event_data: event data payload
  7692. * Context: user.
  7693. */
  7694. static void
  7695. _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
  7696. Mpi2EventDataSasEnclDevStatusChange_t *event_data)
  7697. {
  7698. char *reason_str = NULL;
  7699. switch (event_data->ReasonCode) {
  7700. case MPI2_EVENT_SAS_ENCL_RC_ADDED:
  7701. reason_str = "enclosure add";
  7702. break;
  7703. case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
  7704. reason_str = "enclosure remove";
  7705. break;
  7706. default:
  7707. reason_str = "unknown reason";
  7708. break;
  7709. }
  7710. ioc_info(ioc, "enclosure status change: (%s)\n"
  7711. "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
  7712. reason_str,
  7713. le16_to_cpu(event_data->EnclosureHandle),
  7714. (u64)le64_to_cpu(event_data->EnclosureLogicalID),
  7715. le16_to_cpu(event_data->StartSlot));
  7716. }
  7717. /**
  7718. * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
  7719. * @ioc: per adapter object
  7720. * @fw_event: The fw_event_work object
  7721. * Context: user.
  7722. */
  7723. static void
  7724. _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
  7725. struct fw_event_work *fw_event)
  7726. {
  7727. Mpi2ConfigReply_t mpi_reply;
  7728. struct _enclosure_node *enclosure_dev = NULL;
  7729. Mpi2EventDataSasEnclDevStatusChange_t *event_data =
  7730. (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
  7731. int rc;
  7732. u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
  7733. if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
  7734. _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
  7735. (Mpi2EventDataSasEnclDevStatusChange_t *)
  7736. fw_event->event_data);
  7737. if (ioc->shost_recovery)
  7738. return;
  7739. if (enclosure_handle)
  7740. enclosure_dev =
  7741. mpt3sas_scsih_enclosure_find_by_handle(ioc,
  7742. enclosure_handle);
  7743. switch (event_data->ReasonCode) {
  7744. case MPI2_EVENT_SAS_ENCL_RC_ADDED:
  7745. if (!enclosure_dev) {
  7746. enclosure_dev =
  7747. kzalloc(sizeof(struct _enclosure_node),
  7748. GFP_KERNEL);
  7749. if (!enclosure_dev) {
  7750. ioc_info(ioc, "failure at %s:%d/%s()!\n",
  7751. __FILE__, __LINE__, __func__);
  7752. return;
  7753. }
  7754. rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
  7755. &enclosure_dev->pg0,
  7756. MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
  7757. enclosure_handle);
  7758. if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
  7759. MPI2_IOCSTATUS_MASK)) {
  7760. kfree(enclosure_dev);
  7761. return;
  7762. }
  7763. list_add_tail(&enclosure_dev->list,
  7764. &ioc->enclosure_list);
  7765. }
  7766. break;
  7767. case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
  7768. if (enclosure_dev) {
  7769. list_del(&enclosure_dev->list);
  7770. kfree(enclosure_dev);
  7771. }
  7772. break;
  7773. default:
  7774. break;
  7775. }
  7776. }
  7777. /**
  7778. * _scsih_sas_broadcast_primitive_event - handle broadcast events
  7779. * @ioc: per adapter object
  7780. * @fw_event: The fw_event_work object
  7781. * Context: user.
  7782. */
  7783. static void
  7784. _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
  7785. struct fw_event_work *fw_event)
  7786. {
  7787. struct scsi_cmnd *scmd;
  7788. struct scsi_device *sdev;
  7789. struct scsiio_tracker *st;
  7790. u16 smid, handle;
  7791. u32 lun;
  7792. struct MPT3SAS_DEVICE *sas_device_priv_data;
  7793. u32 termination_count;
  7794. u32 query_count;
  7795. Mpi2SCSITaskManagementReply_t *mpi_reply;
  7796. Mpi2EventDataSasBroadcastPrimitive_t *event_data =
  7797. (Mpi2EventDataSasBroadcastPrimitive_t *)
  7798. fw_event->event_data;
  7799. u16 ioc_status;
  7800. unsigned long flags;
  7801. int r;
  7802. u8 max_retries = 0;
  7803. u8 task_abort_retries;
  7804. mutex_lock(&ioc->tm_cmds.mutex);
  7805. ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
  7806. __func__, event_data->PhyNum, event_data->PortWidth);
  7807. _scsih_block_io_all_device(ioc);
  7808. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  7809. mpi_reply = ioc->tm_cmds.reply;
  7810. broadcast_aen_retry:
  7811. /* sanity checks for retrying this loop */
  7812. if (max_retries++ == 5) {
  7813. dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
  7814. goto out;
  7815. } else if (max_retries > 1)
  7816. dewtprintk(ioc,
  7817. ioc_info(ioc, "%s: %d retry\n",
  7818. __func__, max_retries - 1));
  7819. termination_count = 0;
  7820. query_count = 0;
  7821. for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
  7822. if (ioc->shost_recovery)
  7823. goto out;
  7824. scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
  7825. if (!scmd)
  7826. continue;
  7827. st = scsi_cmd_priv(scmd);
  7828. sdev = scmd->device;
  7829. sas_device_priv_data = sdev->hostdata;
  7830. if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
  7831. continue;
  7832. /* skip hidden raid components */
  7833. if (sas_device_priv_data->sas_target->flags &
  7834. MPT_TARGET_FLAGS_RAID_COMPONENT)
  7835. continue;
  7836. /* skip volumes */
  7837. if (sas_device_priv_data->sas_target->flags &
  7838. MPT_TARGET_FLAGS_VOLUME)
  7839. continue;
  7840. /* skip PCIe devices */
  7841. if (sas_device_priv_data->sas_target->flags &
  7842. MPT_TARGET_FLAGS_PCIE_DEVICE)
  7843. continue;
  7844. handle = sas_device_priv_data->sas_target->handle;
  7845. lun = sas_device_priv_data->lun;
  7846. query_count++;
  7847. if (ioc->shost_recovery)
  7848. goto out;
  7849. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  7850. r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
  7851. MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
  7852. st->msix_io, 30, 0);
  7853. if (r == FAILED) {
  7854. sdev_printk(KERN_WARNING, sdev,
  7855. "mpt3sas_scsih_issue_tm: FAILED when sending "
  7856. "QUERY_TASK: scmd(%p)\n", scmd);
  7857. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  7858. goto broadcast_aen_retry;
  7859. }
  7860. ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
  7861. & MPI2_IOCSTATUS_MASK;
  7862. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  7863. sdev_printk(KERN_WARNING, sdev,
  7864. "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
  7865. ioc_status, scmd);
  7866. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  7867. goto broadcast_aen_retry;
  7868. }
  7869. /* see if IO is still owned by IOC and target */
  7870. if (mpi_reply->ResponseCode ==
  7871. MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
  7872. mpi_reply->ResponseCode ==
  7873. MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
  7874. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  7875. continue;
  7876. }
  7877. task_abort_retries = 0;
  7878. tm_retry:
  7879. if (task_abort_retries++ == 60) {
  7880. dewtprintk(ioc,
  7881. ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
  7882. __func__));
  7883. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  7884. goto broadcast_aen_retry;
  7885. }
  7886. if (ioc->shost_recovery)
  7887. goto out_no_lock;
  7888. r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
  7889. sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
  7890. st->smid, st->msix_io, 30, 0);
  7891. if (r == FAILED || st->cb_idx != 0xFF) {
  7892. sdev_printk(KERN_WARNING, sdev,
  7893. "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
  7894. "scmd(%p)\n", scmd);
  7895. goto tm_retry;
  7896. }
  7897. if (task_abort_retries > 1)
  7898. sdev_printk(KERN_WARNING, sdev,
  7899. "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
  7900. " scmd(%p)\n",
  7901. task_abort_retries - 1, scmd);
  7902. termination_count += le32_to_cpu(mpi_reply->TerminationCount);
  7903. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  7904. }
  7905. if (ioc->broadcast_aen_pending) {
  7906. dewtprintk(ioc,
  7907. ioc_info(ioc,
  7908. "%s: loop back due to pending AEN\n",
  7909. __func__));
  7910. ioc->broadcast_aen_pending = 0;
  7911. goto broadcast_aen_retry;
  7912. }
  7913. out:
  7914. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  7915. out_no_lock:
  7916. dewtprintk(ioc,
  7917. ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
  7918. __func__, query_count, termination_count));
  7919. ioc->broadcast_aen_busy = 0;
  7920. if (!ioc->shost_recovery)
  7921. _scsih_ublock_io_all_device(ioc);
  7922. mutex_unlock(&ioc->tm_cmds.mutex);
  7923. }
  7924. /**
  7925. * _scsih_sas_discovery_event - handle discovery events
  7926. * @ioc: per adapter object
  7927. * @fw_event: The fw_event_work object
  7928. * Context: user.
  7929. */
  7930. static void
  7931. _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
  7932. struct fw_event_work *fw_event)
  7933. {
  7934. Mpi2EventDataSasDiscovery_t *event_data =
  7935. (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
  7936. if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
  7937. ioc_info(ioc, "discovery event: (%s)",
  7938. event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
  7939. "start" : "stop");
  7940. if (event_data->DiscoveryStatus)
  7941. pr_cont("discovery_status(0x%08x)",
  7942. le32_to_cpu(event_data->DiscoveryStatus));
  7943. pr_cont("\n");
  7944. }
  7945. if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
  7946. !ioc->sas_hba.num_phys) {
  7947. if (disable_discovery > 0 && ioc->shost_recovery) {
  7948. /* Wait for the reset to complete */
  7949. while (ioc->shost_recovery)
  7950. ssleep(1);
  7951. }
  7952. _scsih_sas_host_add(ioc);
  7953. }
  7954. }
  7955. /**
  7956. * _scsih_sas_device_discovery_error_event - display SAS device discovery error
  7957. * events
  7958. * @ioc: per adapter object
  7959. * @fw_event: The fw_event_work object
  7960. * Context: user.
  7961. */
  7962. static void
  7963. _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
  7964. struct fw_event_work *fw_event)
  7965. {
  7966. Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
  7967. (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
  7968. switch (event_data->ReasonCode) {
  7969. case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
  7970. ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
  7971. le16_to_cpu(event_data->DevHandle),
  7972. (u64)le64_to_cpu(event_data->SASAddress),
  7973. event_data->PhysicalPort);
  7974. break;
  7975. case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
  7976. ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
  7977. le16_to_cpu(event_data->DevHandle),
  7978. (u64)le64_to_cpu(event_data->SASAddress),
  7979. event_data->PhysicalPort);
  7980. break;
  7981. default:
  7982. break;
  7983. }
  7984. }
  7985. /**
  7986. * _scsih_pcie_enumeration_event - handle enumeration events
  7987. * @ioc: per adapter object
  7988. * @fw_event: The fw_event_work object
  7989. * Context: user.
  7990. */
  7991. static void
  7992. _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
  7993. struct fw_event_work *fw_event)
  7994. {
  7995. Mpi26EventDataPCIeEnumeration_t *event_data =
  7996. (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
  7997. if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
  7998. return;
  7999. ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
  8000. (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
  8001. "started" : "completed",
  8002. event_data->Flags);
  8003. if (event_data->EnumerationStatus)
  8004. pr_cont("enumeration_status(0x%08x)",
  8005. le32_to_cpu(event_data->EnumerationStatus));
  8006. pr_cont("\n");
  8007. }
  8008. /**
  8009. * _scsih_ir_fastpath - turn on fastpath for IR physdisk
  8010. * @ioc: per adapter object
  8011. * @handle: device handle for physical disk
  8012. * @phys_disk_num: physical disk number
  8013. *
  8014. * Return: 0 for success, else failure.
  8015. */
  8016. static int
  8017. _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
  8018. {
  8019. Mpi2RaidActionRequest_t *mpi_request;
  8020. Mpi2RaidActionReply_t *mpi_reply;
  8021. u16 smid;
  8022. u8 issue_reset = 0;
  8023. int rc = 0;
  8024. u16 ioc_status;
  8025. u32 log_info;
  8026. if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
  8027. return rc;
  8028. mutex_lock(&ioc->scsih_cmds.mutex);
  8029. if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
  8030. ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
  8031. rc = -EAGAIN;
  8032. goto out;
  8033. }
  8034. ioc->scsih_cmds.status = MPT3_CMD_PENDING;
  8035. smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
  8036. if (!smid) {
  8037. ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
  8038. ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
  8039. rc = -EAGAIN;
  8040. goto out;
  8041. }
  8042. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  8043. ioc->scsih_cmds.smid = smid;
  8044. memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
  8045. mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
  8046. mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
  8047. mpi_request->PhysDiskNum = phys_disk_num;
  8048. dewtprintk(ioc,
  8049. ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
  8050. handle, phys_disk_num));
  8051. init_completion(&ioc->scsih_cmds.done);
  8052. ioc->put_smid_default(ioc, smid);
  8053. wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
  8054. if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
  8055. mpt3sas_check_cmd_timeout(ioc,
  8056. ioc->scsih_cmds.status, mpi_request,
  8057. sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
  8058. rc = -EFAULT;
  8059. goto out;
  8060. }
  8061. if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
  8062. mpi_reply = ioc->scsih_cmds.reply;
  8063. ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
  8064. if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
  8065. log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
  8066. else
  8067. log_info = 0;
  8068. ioc_status &= MPI2_IOCSTATUS_MASK;
  8069. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  8070. dewtprintk(ioc,
  8071. ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
  8072. ioc_status, log_info));
  8073. rc = -EFAULT;
  8074. } else
  8075. dewtprintk(ioc,
  8076. ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
  8077. }
  8078. out:
  8079. ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
  8080. mutex_unlock(&ioc->scsih_cmds.mutex);
  8081. if (issue_reset)
  8082. mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  8083. return rc;
  8084. }
  8085. /**
  8086. * _scsih_reprobe_lun - reprobing lun
  8087. * @sdev: scsi device struct
  8088. * @no_uld_attach: sdev->no_uld_attach flag setting
  8089. *
  8090. **/
  8091. static void
  8092. _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
  8093. {
  8094. sdev->no_uld_attach = no_uld_attach ? 1 : 0;
  8095. sdev_printk(KERN_INFO, sdev, "%s raid component\n",
  8096. sdev->no_uld_attach ? "hiding" : "exposing");
  8097. WARN_ON(scsi_device_reprobe(sdev));
  8098. }
  8099. /**
  8100. * _scsih_sas_volume_add - add new volume
  8101. * @ioc: per adapter object
  8102. * @element: IR config element data
  8103. * Context: user.
  8104. */
  8105. static void
  8106. _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
  8107. Mpi2EventIrConfigElement_t *element)
  8108. {
  8109. struct _raid_device *raid_device;
  8110. unsigned long flags;
  8111. u64 wwid;
  8112. u16 handle = le16_to_cpu(element->VolDevHandle);
  8113. int rc;
  8114. mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
  8115. if (!wwid) {
  8116. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  8117. __FILE__, __LINE__, __func__);
  8118. return;
  8119. }
  8120. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  8121. raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
  8122. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  8123. if (raid_device)
  8124. return;
  8125. raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
  8126. if (!raid_device) {
  8127. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  8128. __FILE__, __LINE__, __func__);
  8129. return;
  8130. }
  8131. raid_device->id = ioc->sas_id++;
  8132. raid_device->channel = RAID_CHANNEL;
  8133. raid_device->handle = handle;
  8134. raid_device->wwid = wwid;
  8135. _scsih_raid_device_add(ioc, raid_device);
  8136. if (!ioc->wait_for_discovery_to_complete) {
  8137. rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
  8138. raid_device->id, 0);
  8139. if (rc)
  8140. _scsih_raid_device_remove(ioc, raid_device);
  8141. } else {
  8142. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  8143. _scsih_determine_boot_device(ioc, raid_device, 1);
  8144. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  8145. }
  8146. }
  8147. /**
  8148. * _scsih_sas_volume_delete - delete volume
  8149. * @ioc: per adapter object
  8150. * @handle: volume device handle
  8151. * Context: user.
  8152. */
  8153. static void
  8154. _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  8155. {
  8156. struct _raid_device *raid_device;
  8157. unsigned long flags;
  8158. struct MPT3SAS_TARGET *sas_target_priv_data;
  8159. struct scsi_target *starget = NULL;
  8160. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  8161. raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
  8162. if (raid_device) {
  8163. if (raid_device->starget) {
  8164. starget = raid_device->starget;
  8165. sas_target_priv_data = starget->hostdata;
  8166. sas_target_priv_data->deleted = 1;
  8167. }
  8168. ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
  8169. raid_device->handle, (u64)raid_device->wwid);
  8170. list_del(&raid_device->list);
  8171. kfree(raid_device);
  8172. }
  8173. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  8174. if (starget)
  8175. scsi_remove_target(&starget->dev);
  8176. }
  8177. /**
  8178. * _scsih_sas_pd_expose - expose pd component to /dev/sdX
  8179. * @ioc: per adapter object
  8180. * @element: IR config element data
  8181. * Context: user.
  8182. */
  8183. static void
  8184. _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
  8185. Mpi2EventIrConfigElement_t *element)
  8186. {
  8187. struct _sas_device *sas_device;
  8188. struct scsi_target *starget = NULL;
  8189. struct MPT3SAS_TARGET *sas_target_priv_data;
  8190. unsigned long flags;
  8191. u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
  8192. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  8193. sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
  8194. if (sas_device) {
  8195. sas_device->volume_handle = 0;
  8196. sas_device->volume_wwid = 0;
  8197. clear_bit(handle, ioc->pd_handles);
  8198. if (sas_device->starget && sas_device->starget->hostdata) {
  8199. starget = sas_device->starget;
  8200. sas_target_priv_data = starget->hostdata;
  8201. sas_target_priv_data->flags &=
  8202. ~MPT_TARGET_FLAGS_RAID_COMPONENT;
  8203. }
  8204. }
  8205. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  8206. if (!sas_device)
  8207. return;
  8208. /* exposing raid component */
  8209. if (starget)
  8210. starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
  8211. sas_device_put(sas_device);
  8212. }
  8213. /**
  8214. * _scsih_sas_pd_hide - hide pd component from /dev/sdX
  8215. * @ioc: per adapter object
  8216. * @element: IR config element data
  8217. * Context: user.
  8218. */
  8219. static void
  8220. _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
  8221. Mpi2EventIrConfigElement_t *element)
  8222. {
  8223. struct _sas_device *sas_device;
  8224. struct scsi_target *starget = NULL;
  8225. struct MPT3SAS_TARGET *sas_target_priv_data;
  8226. unsigned long flags;
  8227. u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
  8228. u16 volume_handle = 0;
  8229. u64 volume_wwid = 0;
  8230. mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
  8231. if (volume_handle)
  8232. mpt3sas_config_get_volume_wwid(ioc, volume_handle,
  8233. &volume_wwid);
  8234. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  8235. sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
  8236. if (sas_device) {
  8237. set_bit(handle, ioc->pd_handles);
  8238. if (sas_device->starget && sas_device->starget->hostdata) {
  8239. starget = sas_device->starget;
  8240. sas_target_priv_data = starget->hostdata;
  8241. sas_target_priv_data->flags |=
  8242. MPT_TARGET_FLAGS_RAID_COMPONENT;
  8243. sas_device->volume_handle = volume_handle;
  8244. sas_device->volume_wwid = volume_wwid;
  8245. }
  8246. }
  8247. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  8248. if (!sas_device)
  8249. return;
  8250. /* hiding raid component */
  8251. _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
  8252. if (starget)
  8253. starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
  8254. sas_device_put(sas_device);
  8255. }
  8256. /**
  8257. * _scsih_sas_pd_delete - delete pd component
  8258. * @ioc: per adapter object
  8259. * @element: IR config element data
  8260. * Context: user.
  8261. */
  8262. static void
  8263. _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
  8264. Mpi2EventIrConfigElement_t *element)
  8265. {
  8266. u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
  8267. _scsih_device_remove_by_handle(ioc, handle);
  8268. }
  8269. /**
  8270. * _scsih_sas_pd_add - remove pd component
  8271. * @ioc: per adapter object
  8272. * @element: IR config element data
  8273. * Context: user.
  8274. */
  8275. static void
  8276. _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
  8277. Mpi2EventIrConfigElement_t *element)
  8278. {
  8279. struct _sas_device *sas_device;
  8280. u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
  8281. Mpi2ConfigReply_t mpi_reply;
  8282. Mpi2SasDevicePage0_t sas_device_pg0;
  8283. u32 ioc_status;
  8284. u64 sas_address;
  8285. u16 parent_handle;
  8286. set_bit(handle, ioc->pd_handles);
  8287. sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
  8288. if (sas_device) {
  8289. _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
  8290. sas_device_put(sas_device);
  8291. return;
  8292. }
  8293. if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
  8294. MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
  8295. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  8296. __FILE__, __LINE__, __func__);
  8297. return;
  8298. }
  8299. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  8300. MPI2_IOCSTATUS_MASK;
  8301. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  8302. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  8303. __FILE__, __LINE__, __func__);
  8304. return;
  8305. }
  8306. parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
  8307. if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
  8308. mpt3sas_transport_update_links(ioc, sas_address, handle,
  8309. sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
  8310. mpt3sas_get_port_by_id(ioc,
  8311. sas_device_pg0.PhysicalPort, 0));
  8312. _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
  8313. _scsih_add_device(ioc, handle, 0, 1);
  8314. }
  8315. /**
  8316. * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
  8317. * @ioc: per adapter object
  8318. * @event_data: event data payload
  8319. * Context: user.
  8320. */
  8321. static void
  8322. _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
  8323. Mpi2EventDataIrConfigChangeList_t *event_data)
  8324. {
  8325. Mpi2EventIrConfigElement_t *element;
  8326. u8 element_type;
  8327. int i;
  8328. char *reason_str = NULL, *element_str = NULL;
  8329. element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
  8330. ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
  8331. le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
  8332. "foreign" : "native",
  8333. event_data->NumElements);
  8334. for (i = 0; i < event_data->NumElements; i++, element++) {
  8335. switch (element->ReasonCode) {
  8336. case MPI2_EVENT_IR_CHANGE_RC_ADDED:
  8337. reason_str = "add";
  8338. break;
  8339. case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
  8340. reason_str = "remove";
  8341. break;
  8342. case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
  8343. reason_str = "no change";
  8344. break;
  8345. case MPI2_EVENT_IR_CHANGE_RC_HIDE:
  8346. reason_str = "hide";
  8347. break;
  8348. case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
  8349. reason_str = "unhide";
  8350. break;
  8351. case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
  8352. reason_str = "volume_created";
  8353. break;
  8354. case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
  8355. reason_str = "volume_deleted";
  8356. break;
  8357. case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
  8358. reason_str = "pd_created";
  8359. break;
  8360. case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
  8361. reason_str = "pd_deleted";
  8362. break;
  8363. default:
  8364. reason_str = "unknown reason";
  8365. break;
  8366. }
  8367. element_type = le16_to_cpu(element->ElementFlags) &
  8368. MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
  8369. switch (element_type) {
  8370. case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
  8371. element_str = "volume";
  8372. break;
  8373. case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
  8374. element_str = "phys disk";
  8375. break;
  8376. case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
  8377. element_str = "hot spare";
  8378. break;
  8379. default:
  8380. element_str = "unknown element";
  8381. break;
  8382. }
  8383. pr_info("\t(%s:%s), vol handle(0x%04x), " \
  8384. "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
  8385. reason_str, le16_to_cpu(element->VolDevHandle),
  8386. le16_to_cpu(element->PhysDiskDevHandle),
  8387. element->PhysDiskNum);
  8388. }
  8389. }
  8390. /**
  8391. * _scsih_sas_ir_config_change_event - handle ir configuration change events
  8392. * @ioc: per adapter object
  8393. * @fw_event: The fw_event_work object
  8394. * Context: user.
  8395. */
  8396. static void
  8397. _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
  8398. struct fw_event_work *fw_event)
  8399. {
  8400. Mpi2EventIrConfigElement_t *element;
  8401. int i;
  8402. u8 foreign_config;
  8403. Mpi2EventDataIrConfigChangeList_t *event_data =
  8404. (Mpi2EventDataIrConfigChangeList_t *)
  8405. fw_event->event_data;
  8406. if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
  8407. (!ioc->hide_ir_msg))
  8408. _scsih_sas_ir_config_change_event_debug(ioc, event_data);
  8409. foreign_config = (le32_to_cpu(event_data->Flags) &
  8410. MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
  8411. element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
  8412. if (ioc->shost_recovery &&
  8413. ioc->hba_mpi_version_belonged != MPI2_VERSION) {
  8414. for (i = 0; i < event_data->NumElements; i++, element++) {
  8415. if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
  8416. _scsih_ir_fastpath(ioc,
  8417. le16_to_cpu(element->PhysDiskDevHandle),
  8418. element->PhysDiskNum);
  8419. }
  8420. return;
  8421. }
  8422. for (i = 0; i < event_data->NumElements; i++, element++) {
  8423. switch (element->ReasonCode) {
  8424. case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
  8425. case MPI2_EVENT_IR_CHANGE_RC_ADDED:
  8426. if (!foreign_config)
  8427. _scsih_sas_volume_add(ioc, element);
  8428. break;
  8429. case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
  8430. case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
  8431. if (!foreign_config)
  8432. _scsih_sas_volume_delete(ioc,
  8433. le16_to_cpu(element->VolDevHandle));
  8434. break;
  8435. case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
  8436. if (!ioc->is_warpdrive)
  8437. _scsih_sas_pd_hide(ioc, element);
  8438. break;
  8439. case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
  8440. if (!ioc->is_warpdrive)
  8441. _scsih_sas_pd_expose(ioc, element);
  8442. break;
  8443. case MPI2_EVENT_IR_CHANGE_RC_HIDE:
  8444. if (!ioc->is_warpdrive)
  8445. _scsih_sas_pd_add(ioc, element);
  8446. break;
  8447. case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
  8448. if (!ioc->is_warpdrive)
  8449. _scsih_sas_pd_delete(ioc, element);
  8450. break;
  8451. }
  8452. }
  8453. }
  8454. /**
  8455. * _scsih_sas_ir_volume_event - IR volume event
  8456. * @ioc: per adapter object
  8457. * @fw_event: The fw_event_work object
  8458. * Context: user.
  8459. */
  8460. static void
  8461. _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
  8462. struct fw_event_work *fw_event)
  8463. {
  8464. u64 wwid;
  8465. unsigned long flags;
  8466. struct _raid_device *raid_device;
  8467. u16 handle;
  8468. u32 state;
  8469. int rc;
  8470. Mpi2EventDataIrVolume_t *event_data =
  8471. (Mpi2EventDataIrVolume_t *) fw_event->event_data;
  8472. if (ioc->shost_recovery)
  8473. return;
  8474. if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
  8475. return;
  8476. handle = le16_to_cpu(event_data->VolDevHandle);
  8477. state = le32_to_cpu(event_data->NewValue);
  8478. if (!ioc->hide_ir_msg)
  8479. dewtprintk(ioc,
  8480. ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
  8481. __func__, handle,
  8482. le32_to_cpu(event_data->PreviousValue),
  8483. state));
  8484. switch (state) {
  8485. case MPI2_RAID_VOL_STATE_MISSING:
  8486. case MPI2_RAID_VOL_STATE_FAILED:
  8487. _scsih_sas_volume_delete(ioc, handle);
  8488. break;
  8489. case MPI2_RAID_VOL_STATE_ONLINE:
  8490. case MPI2_RAID_VOL_STATE_DEGRADED:
  8491. case MPI2_RAID_VOL_STATE_OPTIMAL:
  8492. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  8493. raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
  8494. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  8495. if (raid_device)
  8496. break;
  8497. mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
  8498. if (!wwid) {
  8499. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  8500. __FILE__, __LINE__, __func__);
  8501. break;
  8502. }
  8503. raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
  8504. if (!raid_device) {
  8505. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  8506. __FILE__, __LINE__, __func__);
  8507. break;
  8508. }
  8509. raid_device->id = ioc->sas_id++;
  8510. raid_device->channel = RAID_CHANNEL;
  8511. raid_device->handle = handle;
  8512. raid_device->wwid = wwid;
  8513. _scsih_raid_device_add(ioc, raid_device);
  8514. rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
  8515. raid_device->id, 0);
  8516. if (rc)
  8517. _scsih_raid_device_remove(ioc, raid_device);
  8518. break;
  8519. case MPI2_RAID_VOL_STATE_INITIALIZING:
  8520. default:
  8521. break;
  8522. }
  8523. }
  8524. /**
  8525. * _scsih_sas_ir_physical_disk_event - PD event
  8526. * @ioc: per adapter object
  8527. * @fw_event: The fw_event_work object
  8528. * Context: user.
  8529. */
  8530. static void
  8531. _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
  8532. struct fw_event_work *fw_event)
  8533. {
  8534. u16 handle, parent_handle;
  8535. u32 state;
  8536. struct _sas_device *sas_device;
  8537. Mpi2ConfigReply_t mpi_reply;
  8538. Mpi2SasDevicePage0_t sas_device_pg0;
  8539. u32 ioc_status;
  8540. Mpi2EventDataIrPhysicalDisk_t *event_data =
  8541. (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
  8542. u64 sas_address;
  8543. if (ioc->shost_recovery)
  8544. return;
  8545. if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
  8546. return;
  8547. handle = le16_to_cpu(event_data->PhysDiskDevHandle);
  8548. state = le32_to_cpu(event_data->NewValue);
  8549. if (!ioc->hide_ir_msg)
  8550. dewtprintk(ioc,
  8551. ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
  8552. __func__, handle,
  8553. le32_to_cpu(event_data->PreviousValue),
  8554. state));
  8555. switch (state) {
  8556. case MPI2_RAID_PD_STATE_ONLINE:
  8557. case MPI2_RAID_PD_STATE_DEGRADED:
  8558. case MPI2_RAID_PD_STATE_REBUILDING:
  8559. case MPI2_RAID_PD_STATE_OPTIMAL:
  8560. case MPI2_RAID_PD_STATE_HOT_SPARE:
  8561. if (!ioc->is_warpdrive)
  8562. set_bit(handle, ioc->pd_handles);
  8563. sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
  8564. if (sas_device) {
  8565. sas_device_put(sas_device);
  8566. return;
  8567. }
  8568. if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
  8569. &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
  8570. handle))) {
  8571. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  8572. __FILE__, __LINE__, __func__);
  8573. return;
  8574. }
  8575. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  8576. MPI2_IOCSTATUS_MASK;
  8577. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  8578. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  8579. __FILE__, __LINE__, __func__);
  8580. return;
  8581. }
  8582. parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
  8583. if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
  8584. mpt3sas_transport_update_links(ioc, sas_address, handle,
  8585. sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
  8586. mpt3sas_get_port_by_id(ioc,
  8587. sas_device_pg0.PhysicalPort, 0));
  8588. _scsih_add_device(ioc, handle, 0, 1);
  8589. break;
  8590. case MPI2_RAID_PD_STATE_OFFLINE:
  8591. case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
  8592. case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
  8593. default:
  8594. break;
  8595. }
  8596. }
  8597. /**
  8598. * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
  8599. * @ioc: per adapter object
  8600. * @event_data: event data payload
  8601. * Context: user.
  8602. */
  8603. static void
  8604. _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
  8605. Mpi2EventDataIrOperationStatus_t *event_data)
  8606. {
  8607. char *reason_str = NULL;
  8608. switch (event_data->RAIDOperation) {
  8609. case MPI2_EVENT_IR_RAIDOP_RESYNC:
  8610. reason_str = "resync";
  8611. break;
  8612. case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
  8613. reason_str = "online capacity expansion";
  8614. break;
  8615. case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
  8616. reason_str = "consistency check";
  8617. break;
  8618. case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
  8619. reason_str = "background init";
  8620. break;
  8621. case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
  8622. reason_str = "make data consistent";
  8623. break;
  8624. }
  8625. if (!reason_str)
  8626. return;
  8627. ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
  8628. reason_str,
  8629. le16_to_cpu(event_data->VolDevHandle),
  8630. event_data->PercentComplete);
  8631. }
  8632. /**
  8633. * _scsih_sas_ir_operation_status_event - handle RAID operation events
  8634. * @ioc: per adapter object
  8635. * @fw_event: The fw_event_work object
  8636. * Context: user.
  8637. */
  8638. static void
  8639. _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
  8640. struct fw_event_work *fw_event)
  8641. {
  8642. Mpi2EventDataIrOperationStatus_t *event_data =
  8643. (Mpi2EventDataIrOperationStatus_t *)
  8644. fw_event->event_data;
  8645. static struct _raid_device *raid_device;
  8646. unsigned long flags;
  8647. u16 handle;
  8648. if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
  8649. (!ioc->hide_ir_msg))
  8650. _scsih_sas_ir_operation_status_event_debug(ioc,
  8651. event_data);
  8652. /* code added for raid transport support */
  8653. if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
  8654. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  8655. handle = le16_to_cpu(event_data->VolDevHandle);
  8656. raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
  8657. if (raid_device)
  8658. raid_device->percent_complete =
  8659. event_data->PercentComplete;
  8660. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  8661. }
  8662. }
  8663. /**
  8664. * _scsih_prep_device_scan - initialize parameters prior to device scan
  8665. * @ioc: per adapter object
  8666. *
  8667. * Set the deleted flag prior to device scan. If the device is found during
  8668. * the scan, then we clear the deleted flag.
  8669. */
  8670. static void
  8671. _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
  8672. {
  8673. struct MPT3SAS_DEVICE *sas_device_priv_data;
  8674. struct scsi_device *sdev;
  8675. shost_for_each_device(sdev, ioc->shost) {
  8676. sas_device_priv_data = sdev->hostdata;
  8677. if (sas_device_priv_data && sas_device_priv_data->sas_target)
  8678. sas_device_priv_data->sas_target->deleted = 1;
  8679. }
  8680. }
  8681. /**
  8682. * _scsih_update_device_qdepth - Update QD during Reset.
  8683. * @ioc: per adapter object
  8684. *
  8685. */
  8686. static void
  8687. _scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
  8688. {
  8689. struct MPT3SAS_DEVICE *sas_device_priv_data;
  8690. struct MPT3SAS_TARGET *sas_target_priv_data;
  8691. struct _sas_device *sas_device;
  8692. struct scsi_device *sdev;
  8693. u16 qdepth;
  8694. ioc_info(ioc, "Update devices with firmware reported queue depth\n");
  8695. shost_for_each_device(sdev, ioc->shost) {
  8696. sas_device_priv_data = sdev->hostdata;
  8697. if (sas_device_priv_data && sas_device_priv_data->sas_target) {
  8698. sas_target_priv_data = sas_device_priv_data->sas_target;
  8699. sas_device = sas_device_priv_data->sas_target->sas_dev;
  8700. if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
  8701. qdepth = ioc->max_nvme_qd;
  8702. else if (sas_device &&
  8703. sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
  8704. qdepth = (sas_device->port_type > 1) ?
  8705. ioc->max_wideport_qd : ioc->max_narrowport_qd;
  8706. else if (sas_device &&
  8707. sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
  8708. qdepth = ioc->max_sata_qd;
  8709. else
  8710. continue;
  8711. mpt3sas_scsih_change_queue_depth(sdev, qdepth);
  8712. }
  8713. }
  8714. }
  8715. /**
  8716. * _scsih_mark_responding_sas_device - mark a sas_devices as responding
  8717. * @ioc: per adapter object
  8718. * @sas_device_pg0: SAS Device page 0
  8719. *
  8720. * After host reset, find out whether devices are still responding.
  8721. * Used in _scsih_remove_unresponsive_sas_devices.
  8722. */
  8723. static void
  8724. _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
  8725. Mpi2SasDevicePage0_t *sas_device_pg0)
  8726. {
  8727. struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
  8728. struct scsi_target *starget;
  8729. struct _sas_device *sas_device = NULL;
  8730. struct _enclosure_node *enclosure_dev = NULL;
  8731. unsigned long flags;
  8732. struct hba_port *port = mpt3sas_get_port_by_id(
  8733. ioc, sas_device_pg0->PhysicalPort, 0);
  8734. if (sas_device_pg0->EnclosureHandle) {
  8735. enclosure_dev =
  8736. mpt3sas_scsih_enclosure_find_by_handle(ioc,
  8737. le16_to_cpu(sas_device_pg0->EnclosureHandle));
  8738. if (enclosure_dev == NULL)
  8739. ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
  8740. sas_device_pg0->EnclosureHandle);
  8741. }
  8742. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  8743. list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
  8744. if (sas_device->sas_address != le64_to_cpu(
  8745. sas_device_pg0->SASAddress))
  8746. continue;
  8747. if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
  8748. continue;
  8749. if (sas_device->port != port)
  8750. continue;
  8751. sas_device->responding = 1;
  8752. starget = sas_device->starget;
  8753. if (starget && starget->hostdata) {
  8754. sas_target_priv_data = starget->hostdata;
  8755. sas_target_priv_data->tm_busy = 0;
  8756. sas_target_priv_data->deleted = 0;
  8757. } else
  8758. sas_target_priv_data = NULL;
  8759. if (starget) {
  8760. starget_printk(KERN_INFO, starget,
  8761. "handle(0x%04x), sas_addr(0x%016llx)\n",
  8762. le16_to_cpu(sas_device_pg0->DevHandle),
  8763. (unsigned long long)
  8764. sas_device->sas_address);
  8765. if (sas_device->enclosure_handle != 0)
  8766. starget_printk(KERN_INFO, starget,
  8767. "enclosure logical id(0x%016llx), slot(%d)\n",
  8768. (unsigned long long)
  8769. sas_device->enclosure_logical_id,
  8770. sas_device->slot);
  8771. }
  8772. if (le16_to_cpu(sas_device_pg0->Flags) &
  8773. MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
  8774. sas_device->enclosure_level =
  8775. sas_device_pg0->EnclosureLevel;
  8776. memcpy(&sas_device->connector_name[0],
  8777. &sas_device_pg0->ConnectorName[0], 4);
  8778. } else {
  8779. sas_device->enclosure_level = 0;
  8780. sas_device->connector_name[0] = '\0';
  8781. }
  8782. sas_device->enclosure_handle =
  8783. le16_to_cpu(sas_device_pg0->EnclosureHandle);
  8784. sas_device->is_chassis_slot_valid = 0;
  8785. if (enclosure_dev) {
  8786. sas_device->enclosure_logical_id = le64_to_cpu(
  8787. enclosure_dev->pg0.EnclosureLogicalID);
  8788. if (le16_to_cpu(enclosure_dev->pg0.Flags) &
  8789. MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
  8790. sas_device->is_chassis_slot_valid = 1;
  8791. sas_device->chassis_slot =
  8792. enclosure_dev->pg0.ChassisSlot;
  8793. }
  8794. }
  8795. if (sas_device->handle == le16_to_cpu(
  8796. sas_device_pg0->DevHandle))
  8797. goto out;
  8798. pr_info("\thandle changed from(0x%04x)!!!\n",
  8799. sas_device->handle);
  8800. sas_device->handle = le16_to_cpu(
  8801. sas_device_pg0->DevHandle);
  8802. if (sas_target_priv_data)
  8803. sas_target_priv_data->handle =
  8804. le16_to_cpu(sas_device_pg0->DevHandle);
  8805. goto out;
  8806. }
  8807. out:
  8808. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  8809. }
  8810. /**
  8811. * _scsih_create_enclosure_list_after_reset - Free Existing list,
  8812. * And create enclosure list by scanning all Enclosure Page(0)s
  8813. * @ioc: per adapter object
  8814. */
  8815. static void
  8816. _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
  8817. {
  8818. struct _enclosure_node *enclosure_dev;
  8819. Mpi2ConfigReply_t mpi_reply;
  8820. u16 enclosure_handle;
  8821. int rc;
  8822. /* Free existing enclosure list */
  8823. mpt3sas_free_enclosure_list(ioc);
  8824. /* Re constructing enclosure list after reset*/
  8825. enclosure_handle = 0xFFFF;
  8826. do {
  8827. enclosure_dev =
  8828. kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
  8829. if (!enclosure_dev) {
  8830. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  8831. __FILE__, __LINE__, __func__);
  8832. return;
  8833. }
  8834. rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
  8835. &enclosure_dev->pg0,
  8836. MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
  8837. enclosure_handle);
  8838. if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
  8839. MPI2_IOCSTATUS_MASK)) {
  8840. kfree(enclosure_dev);
  8841. return;
  8842. }
  8843. list_add_tail(&enclosure_dev->list,
  8844. &ioc->enclosure_list);
  8845. enclosure_handle =
  8846. le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
  8847. } while (1);
  8848. }
  8849. /**
  8850. * _scsih_search_responding_sas_devices -
  8851. * @ioc: per adapter object
  8852. *
  8853. * After host reset, find out whether devices are still responding.
  8854. * If not remove.
  8855. */
  8856. static void
  8857. _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
  8858. {
  8859. Mpi2SasDevicePage0_t sas_device_pg0;
  8860. Mpi2ConfigReply_t mpi_reply;
  8861. u16 ioc_status;
  8862. u16 handle;
  8863. u32 device_info;
  8864. ioc_info(ioc, "search for end-devices: start\n");
  8865. if (list_empty(&ioc->sas_device_list))
  8866. goto out;
  8867. handle = 0xFFFF;
  8868. while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
  8869. &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
  8870. handle))) {
  8871. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  8872. MPI2_IOCSTATUS_MASK;
  8873. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  8874. break;
  8875. handle = le16_to_cpu(sas_device_pg0.DevHandle);
  8876. device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
  8877. if (!(_scsih_is_end_device(device_info)))
  8878. continue;
  8879. _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
  8880. }
  8881. out:
  8882. ioc_info(ioc, "search for end-devices: complete\n");
  8883. }
  8884. /**
  8885. * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
  8886. * @ioc: per adapter object
  8887. * @pcie_device_pg0: PCIe Device page 0
  8888. *
  8889. * After host reset, find out whether devices are still responding.
  8890. * Used in _scsih_remove_unresponding_devices.
  8891. */
  8892. static void
  8893. _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
  8894. Mpi26PCIeDevicePage0_t *pcie_device_pg0)
  8895. {
  8896. struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
  8897. struct scsi_target *starget;
  8898. struct _pcie_device *pcie_device;
  8899. unsigned long flags;
  8900. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  8901. list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
  8902. if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
  8903. && (pcie_device->slot == le16_to_cpu(
  8904. pcie_device_pg0->Slot))) {
  8905. pcie_device->access_status =
  8906. pcie_device_pg0->AccessStatus;
  8907. pcie_device->responding = 1;
  8908. starget = pcie_device->starget;
  8909. if (starget && starget->hostdata) {
  8910. sas_target_priv_data = starget->hostdata;
  8911. sas_target_priv_data->tm_busy = 0;
  8912. sas_target_priv_data->deleted = 0;
  8913. } else
  8914. sas_target_priv_data = NULL;
  8915. if (starget) {
  8916. starget_printk(KERN_INFO, starget,
  8917. "handle(0x%04x), wwid(0x%016llx) ",
  8918. pcie_device->handle,
  8919. (unsigned long long)pcie_device->wwid);
  8920. if (pcie_device->enclosure_handle != 0)
  8921. starget_printk(KERN_INFO, starget,
  8922. "enclosure logical id(0x%016llx), "
  8923. "slot(%d)\n",
  8924. (unsigned long long)
  8925. pcie_device->enclosure_logical_id,
  8926. pcie_device->slot);
  8927. }
  8928. if (((le32_to_cpu(pcie_device_pg0->Flags)) &
  8929. MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
  8930. (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
  8931. pcie_device->enclosure_level =
  8932. pcie_device_pg0->EnclosureLevel;
  8933. memcpy(&pcie_device->connector_name[0],
  8934. &pcie_device_pg0->ConnectorName[0], 4);
  8935. } else {
  8936. pcie_device->enclosure_level = 0;
  8937. pcie_device->connector_name[0] = '\0';
  8938. }
  8939. if (pcie_device->handle == le16_to_cpu(
  8940. pcie_device_pg0->DevHandle))
  8941. goto out;
  8942. pr_info("\thandle changed from(0x%04x)!!!\n",
  8943. pcie_device->handle);
  8944. pcie_device->handle = le16_to_cpu(
  8945. pcie_device_pg0->DevHandle);
  8946. if (sas_target_priv_data)
  8947. sas_target_priv_data->handle =
  8948. le16_to_cpu(pcie_device_pg0->DevHandle);
  8949. goto out;
  8950. }
  8951. }
  8952. out:
  8953. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  8954. }
  8955. /**
  8956. * _scsih_search_responding_pcie_devices -
  8957. * @ioc: per adapter object
  8958. *
  8959. * After host reset, find out whether devices are still responding.
  8960. * If not remove.
  8961. */
  8962. static void
  8963. _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
  8964. {
  8965. Mpi26PCIeDevicePage0_t pcie_device_pg0;
  8966. Mpi2ConfigReply_t mpi_reply;
  8967. u16 ioc_status;
  8968. u16 handle;
  8969. u32 device_info;
  8970. ioc_info(ioc, "search for end-devices: start\n");
  8971. if (list_empty(&ioc->pcie_device_list))
  8972. goto out;
  8973. handle = 0xFFFF;
  8974. while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
  8975. &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
  8976. handle))) {
  8977. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  8978. MPI2_IOCSTATUS_MASK;
  8979. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  8980. ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
  8981. __func__, ioc_status,
  8982. le32_to_cpu(mpi_reply.IOCLogInfo));
  8983. break;
  8984. }
  8985. handle = le16_to_cpu(pcie_device_pg0.DevHandle);
  8986. device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
  8987. if (!(_scsih_is_nvme_pciescsi_device(device_info)))
  8988. continue;
  8989. _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
  8990. }
  8991. out:
  8992. ioc_info(ioc, "search for PCIe end-devices: complete\n");
  8993. }
  8994. /**
  8995. * _scsih_mark_responding_raid_device - mark a raid_device as responding
  8996. * @ioc: per adapter object
  8997. * @wwid: world wide identifier for raid volume
  8998. * @handle: device handle
  8999. *
  9000. * After host reset, find out whether devices are still responding.
  9001. * Used in _scsih_remove_unresponsive_raid_devices.
  9002. */
  9003. static void
  9004. _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
  9005. u16 handle)
  9006. {
  9007. struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
  9008. struct scsi_target *starget;
  9009. struct _raid_device *raid_device;
  9010. unsigned long flags;
  9011. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  9012. list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
  9013. if (raid_device->wwid == wwid && raid_device->starget) {
  9014. starget = raid_device->starget;
  9015. if (starget && starget->hostdata) {
  9016. sas_target_priv_data = starget->hostdata;
  9017. sas_target_priv_data->deleted = 0;
  9018. } else
  9019. sas_target_priv_data = NULL;
  9020. raid_device->responding = 1;
  9021. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  9022. starget_printk(KERN_INFO, raid_device->starget,
  9023. "handle(0x%04x), wwid(0x%016llx)\n", handle,
  9024. (unsigned long long)raid_device->wwid);
  9025. /*
  9026. * WARPDRIVE: The handles of the PDs might have changed
  9027. * across the host reset so re-initialize the
  9028. * required data for Direct IO
  9029. */
  9030. mpt3sas_init_warpdrive_properties(ioc, raid_device);
  9031. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  9032. if (raid_device->handle == handle) {
  9033. spin_unlock_irqrestore(&ioc->raid_device_lock,
  9034. flags);
  9035. return;
  9036. }
  9037. pr_info("\thandle changed from(0x%04x)!!!\n",
  9038. raid_device->handle);
  9039. raid_device->handle = handle;
  9040. if (sas_target_priv_data)
  9041. sas_target_priv_data->handle = handle;
  9042. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  9043. return;
  9044. }
  9045. }
  9046. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  9047. }
  9048. /**
  9049. * _scsih_search_responding_raid_devices -
  9050. * @ioc: per adapter object
  9051. *
  9052. * After host reset, find out whether devices are still responding.
  9053. * If not remove.
  9054. */
  9055. static void
  9056. _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
  9057. {
  9058. Mpi2RaidVolPage1_t volume_pg1;
  9059. Mpi2RaidVolPage0_t volume_pg0;
  9060. Mpi2RaidPhysDiskPage0_t pd_pg0;
  9061. Mpi2ConfigReply_t mpi_reply;
  9062. u16 ioc_status;
  9063. u16 handle;
  9064. u8 phys_disk_num;
  9065. if (!ioc->ir_firmware)
  9066. return;
  9067. ioc_info(ioc, "search for raid volumes: start\n");
  9068. if (list_empty(&ioc->raid_device_list))
  9069. goto out;
  9070. handle = 0xFFFF;
  9071. while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
  9072. &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
  9073. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  9074. MPI2_IOCSTATUS_MASK;
  9075. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  9076. break;
  9077. handle = le16_to_cpu(volume_pg1.DevHandle);
  9078. if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
  9079. &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
  9080. sizeof(Mpi2RaidVolPage0_t)))
  9081. continue;
  9082. if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
  9083. volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
  9084. volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
  9085. _scsih_mark_responding_raid_device(ioc,
  9086. le64_to_cpu(volume_pg1.WWID), handle);
  9087. }
  9088. /* refresh the pd_handles */
  9089. if (!ioc->is_warpdrive) {
  9090. phys_disk_num = 0xFF;
  9091. memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
  9092. while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
  9093. &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
  9094. phys_disk_num))) {
  9095. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  9096. MPI2_IOCSTATUS_MASK;
  9097. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  9098. break;
  9099. phys_disk_num = pd_pg0.PhysDiskNum;
  9100. handle = le16_to_cpu(pd_pg0.DevHandle);
  9101. set_bit(handle, ioc->pd_handles);
  9102. }
  9103. }
  9104. out:
  9105. ioc_info(ioc, "search for responding raid volumes: complete\n");
  9106. }
  9107. /**
  9108. * _scsih_mark_responding_expander - mark a expander as responding
  9109. * @ioc: per adapter object
  9110. * @expander_pg0:SAS Expander Config Page0
  9111. *
  9112. * After host reset, find out whether devices are still responding.
  9113. * Used in _scsih_remove_unresponsive_expanders.
  9114. */
  9115. static void
  9116. _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
  9117. Mpi2ExpanderPage0_t *expander_pg0)
  9118. {
  9119. struct _sas_node *sas_expander = NULL;
  9120. unsigned long flags;
  9121. int i;
  9122. struct _enclosure_node *enclosure_dev = NULL;
  9123. u16 handle = le16_to_cpu(expander_pg0->DevHandle);
  9124. u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
  9125. u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
  9126. struct hba_port *port = mpt3sas_get_port_by_id(
  9127. ioc, expander_pg0->PhysicalPort, 0);
  9128. if (enclosure_handle)
  9129. enclosure_dev =
  9130. mpt3sas_scsih_enclosure_find_by_handle(ioc,
  9131. enclosure_handle);
  9132. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  9133. list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
  9134. if (sas_expander->sas_address != sas_address)
  9135. continue;
  9136. if (sas_expander->port != port)
  9137. continue;
  9138. sas_expander->responding = 1;
  9139. if (enclosure_dev) {
  9140. sas_expander->enclosure_logical_id =
  9141. le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
  9142. sas_expander->enclosure_handle =
  9143. le16_to_cpu(expander_pg0->EnclosureHandle);
  9144. }
  9145. if (sas_expander->handle == handle)
  9146. goto out;
  9147. pr_info("\texpander(0x%016llx): handle changed" \
  9148. " from(0x%04x) to (0x%04x)!!!\n",
  9149. (unsigned long long)sas_expander->sas_address,
  9150. sas_expander->handle, handle);
  9151. sas_expander->handle = handle;
  9152. for (i = 0 ; i < sas_expander->num_phys ; i++)
  9153. sas_expander->phy[i].handle = handle;
  9154. goto out;
  9155. }
  9156. out:
  9157. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  9158. }
  9159. /**
  9160. * _scsih_search_responding_expanders -
  9161. * @ioc: per adapter object
  9162. *
  9163. * After host reset, find out whether devices are still responding.
  9164. * If not remove.
  9165. */
  9166. static void
  9167. _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
  9168. {
  9169. Mpi2ExpanderPage0_t expander_pg0;
  9170. Mpi2ConfigReply_t mpi_reply;
  9171. u16 ioc_status;
  9172. u64 sas_address;
  9173. u16 handle;
  9174. u8 port;
  9175. ioc_info(ioc, "search for expanders: start\n");
  9176. if (list_empty(&ioc->sas_expander_list))
  9177. goto out;
  9178. handle = 0xFFFF;
  9179. while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
  9180. MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
  9181. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  9182. MPI2_IOCSTATUS_MASK;
  9183. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  9184. break;
  9185. handle = le16_to_cpu(expander_pg0.DevHandle);
  9186. sas_address = le64_to_cpu(expander_pg0.SASAddress);
  9187. port = expander_pg0.PhysicalPort;
  9188. pr_info(
  9189. "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
  9190. handle, (unsigned long long)sas_address,
  9191. (ioc->multipath_on_hba ?
  9192. port : MULTIPATH_DISABLED_PORT_ID));
  9193. _scsih_mark_responding_expander(ioc, &expander_pg0);
  9194. }
  9195. out:
  9196. ioc_info(ioc, "search for expanders: complete\n");
  9197. }
  9198. /**
  9199. * _scsih_remove_unresponding_devices - removing unresponding devices
  9200. * @ioc: per adapter object
  9201. */
  9202. static void
  9203. _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
  9204. {
  9205. struct _sas_device *sas_device, *sas_device_next;
  9206. struct _sas_node *sas_expander, *sas_expander_next;
  9207. struct _raid_device *raid_device, *raid_device_next;
  9208. struct _pcie_device *pcie_device, *pcie_device_next;
  9209. struct list_head tmp_list;
  9210. unsigned long flags;
  9211. LIST_HEAD(head);
  9212. ioc_info(ioc, "removing unresponding devices: start\n");
  9213. /* removing unresponding end devices */
  9214. ioc_info(ioc, "removing unresponding devices: end-devices\n");
  9215. /*
  9216. * Iterate, pulling off devices marked as non-responding. We become the
  9217. * owner for the reference the list had on any object we prune.
  9218. */
  9219. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  9220. /*
  9221. * Clean up the sas_device_init_list list as
  9222. * driver goes for fresh scan as part of diag reset.
  9223. */
  9224. list_for_each_entry_safe(sas_device, sas_device_next,
  9225. &ioc->sas_device_init_list, list) {
  9226. list_del_init(&sas_device->list);
  9227. sas_device_put(sas_device);
  9228. }
  9229. list_for_each_entry_safe(sas_device, sas_device_next,
  9230. &ioc->sas_device_list, list) {
  9231. if (!sas_device->responding)
  9232. list_move_tail(&sas_device->list, &head);
  9233. else
  9234. sas_device->responding = 0;
  9235. }
  9236. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  9237. /*
  9238. * Now, uninitialize and remove the unresponding devices we pruned.
  9239. */
  9240. list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
  9241. _scsih_remove_device(ioc, sas_device);
  9242. list_del_init(&sas_device->list);
  9243. sas_device_put(sas_device);
  9244. }
  9245. ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
  9246. INIT_LIST_HEAD(&head);
  9247. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  9248. /*
  9249. * Clean up the pcie_device_init_list list as
  9250. * driver goes for fresh scan as part of diag reset.
  9251. */
  9252. list_for_each_entry_safe(pcie_device, pcie_device_next,
  9253. &ioc->pcie_device_init_list, list) {
  9254. list_del_init(&pcie_device->list);
  9255. pcie_device_put(pcie_device);
  9256. }
  9257. list_for_each_entry_safe(pcie_device, pcie_device_next,
  9258. &ioc->pcie_device_list, list) {
  9259. if (!pcie_device->responding)
  9260. list_move_tail(&pcie_device->list, &head);
  9261. else
  9262. pcie_device->responding = 0;
  9263. }
  9264. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  9265. list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
  9266. _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
  9267. list_del_init(&pcie_device->list);
  9268. pcie_device_put(pcie_device);
  9269. }
  9270. /* removing unresponding volumes */
  9271. if (ioc->ir_firmware) {
  9272. ioc_info(ioc, "removing unresponding devices: volumes\n");
  9273. list_for_each_entry_safe(raid_device, raid_device_next,
  9274. &ioc->raid_device_list, list) {
  9275. if (!raid_device->responding)
  9276. _scsih_sas_volume_delete(ioc,
  9277. raid_device->handle);
  9278. else
  9279. raid_device->responding = 0;
  9280. }
  9281. }
  9282. /* removing unresponding expanders */
  9283. ioc_info(ioc, "removing unresponding devices: expanders\n");
  9284. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  9285. INIT_LIST_HEAD(&tmp_list);
  9286. list_for_each_entry_safe(sas_expander, sas_expander_next,
  9287. &ioc->sas_expander_list, list) {
  9288. if (!sas_expander->responding)
  9289. list_move_tail(&sas_expander->list, &tmp_list);
  9290. else
  9291. sas_expander->responding = 0;
  9292. }
  9293. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  9294. list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
  9295. list) {
  9296. _scsih_expander_node_remove(ioc, sas_expander);
  9297. }
  9298. ioc_info(ioc, "removing unresponding devices: complete\n");
  9299. /* unblock devices */
  9300. _scsih_ublock_io_all_device(ioc);
  9301. }
  9302. static void
  9303. _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
  9304. struct _sas_node *sas_expander, u16 handle)
  9305. {
  9306. Mpi2ExpanderPage1_t expander_pg1;
  9307. Mpi2ConfigReply_t mpi_reply;
  9308. int i;
  9309. for (i = 0 ; i < sas_expander->num_phys ; i++) {
  9310. if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
  9311. &expander_pg1, i, handle))) {
  9312. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  9313. __FILE__, __LINE__, __func__);
  9314. return;
  9315. }
  9316. mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
  9317. le16_to_cpu(expander_pg1.AttachedDevHandle), i,
  9318. expander_pg1.NegotiatedLinkRate >> 4,
  9319. sas_expander->port);
  9320. }
  9321. }
  9322. /**
  9323. * _scsih_scan_for_devices_after_reset - scan for devices after host reset
  9324. * @ioc: per adapter object
  9325. */
  9326. static void
  9327. _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
  9328. {
  9329. Mpi2ExpanderPage0_t expander_pg0;
  9330. Mpi2SasDevicePage0_t sas_device_pg0;
  9331. Mpi26PCIeDevicePage0_t pcie_device_pg0;
  9332. Mpi2RaidVolPage1_t *volume_pg1;
  9333. Mpi2RaidVolPage0_t *volume_pg0;
  9334. Mpi2RaidPhysDiskPage0_t pd_pg0;
  9335. Mpi2EventIrConfigElement_t element;
  9336. Mpi2ConfigReply_t mpi_reply;
  9337. u8 phys_disk_num, port_id;
  9338. u16 ioc_status;
  9339. u16 handle, parent_handle;
  9340. u64 sas_address;
  9341. struct _sas_device *sas_device;
  9342. struct _pcie_device *pcie_device;
  9343. struct _sas_node *expander_device;
  9344. static struct _raid_device *raid_device;
  9345. u8 retry_count;
  9346. unsigned long flags;
  9347. volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL);
  9348. if (!volume_pg0)
  9349. return;
  9350. volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL);
  9351. if (!volume_pg1) {
  9352. kfree(volume_pg0);
  9353. return;
  9354. }
  9355. ioc_info(ioc, "scan devices: start\n");
  9356. _scsih_sas_host_refresh(ioc);
  9357. ioc_info(ioc, "\tscan devices: expanders start\n");
  9358. /* expanders */
  9359. handle = 0xFFFF;
  9360. while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
  9361. MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
  9362. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  9363. MPI2_IOCSTATUS_MASK;
  9364. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  9365. ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
  9366. ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
  9367. break;
  9368. }
  9369. handle = le16_to_cpu(expander_pg0.DevHandle);
  9370. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  9371. port_id = expander_pg0.PhysicalPort;
  9372. expander_device = mpt3sas_scsih_expander_find_by_sas_address(
  9373. ioc, le64_to_cpu(expander_pg0.SASAddress),
  9374. mpt3sas_get_port_by_id(ioc, port_id, 0));
  9375. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  9376. if (expander_device)
  9377. _scsih_refresh_expander_links(ioc, expander_device,
  9378. handle);
  9379. else {
  9380. ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
  9381. handle,
  9382. (u64)le64_to_cpu(expander_pg0.SASAddress));
  9383. _scsih_expander_add(ioc, handle);
  9384. ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
  9385. handle,
  9386. (u64)le64_to_cpu(expander_pg0.SASAddress));
  9387. }
  9388. }
  9389. ioc_info(ioc, "\tscan devices: expanders complete\n");
  9390. if (!ioc->ir_firmware)
  9391. goto skip_to_sas;
  9392. ioc_info(ioc, "\tscan devices: phys disk start\n");
  9393. /* phys disk */
  9394. phys_disk_num = 0xFF;
  9395. while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
  9396. &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
  9397. phys_disk_num))) {
  9398. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  9399. MPI2_IOCSTATUS_MASK;
  9400. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  9401. ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
  9402. ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
  9403. break;
  9404. }
  9405. phys_disk_num = pd_pg0.PhysDiskNum;
  9406. handle = le16_to_cpu(pd_pg0.DevHandle);
  9407. sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
  9408. if (sas_device) {
  9409. sas_device_put(sas_device);
  9410. continue;
  9411. }
  9412. if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
  9413. &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
  9414. handle) != 0)
  9415. continue;
  9416. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  9417. MPI2_IOCSTATUS_MASK;
  9418. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  9419. ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
  9420. ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
  9421. break;
  9422. }
  9423. parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
  9424. if (!_scsih_get_sas_address(ioc, parent_handle,
  9425. &sas_address)) {
  9426. ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
  9427. handle,
  9428. (u64)le64_to_cpu(sas_device_pg0.SASAddress));
  9429. port_id = sas_device_pg0.PhysicalPort;
  9430. mpt3sas_transport_update_links(ioc, sas_address,
  9431. handle, sas_device_pg0.PhyNum,
  9432. MPI2_SAS_NEG_LINK_RATE_1_5,
  9433. mpt3sas_get_port_by_id(ioc, port_id, 0));
  9434. set_bit(handle, ioc->pd_handles);
  9435. retry_count = 0;
  9436. /* This will retry adding the end device.
  9437. * _scsih_add_device() will decide on retries and
  9438. * return "1" when it should be retried
  9439. */
  9440. while (_scsih_add_device(ioc, handle, retry_count++,
  9441. 1)) {
  9442. ssleep(1);
  9443. }
  9444. ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
  9445. handle,
  9446. (u64)le64_to_cpu(sas_device_pg0.SASAddress));
  9447. }
  9448. }
  9449. ioc_info(ioc, "\tscan devices: phys disk complete\n");
  9450. ioc_info(ioc, "\tscan devices: volumes start\n");
  9451. /* volumes */
  9452. handle = 0xFFFF;
  9453. while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
  9454. volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
  9455. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  9456. MPI2_IOCSTATUS_MASK;
  9457. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  9458. ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
  9459. ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
  9460. break;
  9461. }
  9462. handle = le16_to_cpu(volume_pg1->DevHandle);
  9463. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  9464. raid_device = _scsih_raid_device_find_by_wwid(ioc,
  9465. le64_to_cpu(volume_pg1->WWID));
  9466. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  9467. if (raid_device)
  9468. continue;
  9469. if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
  9470. volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
  9471. sizeof(Mpi2RaidVolPage0_t)))
  9472. continue;
  9473. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  9474. MPI2_IOCSTATUS_MASK;
  9475. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  9476. ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
  9477. ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
  9478. break;
  9479. }
  9480. if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
  9481. volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
  9482. volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
  9483. memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
  9484. element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
  9485. element.VolDevHandle = volume_pg1->DevHandle;
  9486. ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
  9487. volume_pg1->DevHandle);
  9488. _scsih_sas_volume_add(ioc, &element);
  9489. ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
  9490. volume_pg1->DevHandle);
  9491. }
  9492. }
  9493. ioc_info(ioc, "\tscan devices: volumes complete\n");
  9494. skip_to_sas:
  9495. ioc_info(ioc, "\tscan devices: end devices start\n");
  9496. /* sas devices */
  9497. handle = 0xFFFF;
  9498. while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
  9499. &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
  9500. handle))) {
  9501. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  9502. MPI2_IOCSTATUS_MASK;
  9503. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  9504. ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
  9505. ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
  9506. break;
  9507. }
  9508. handle = le16_to_cpu(sas_device_pg0.DevHandle);
  9509. if (!(_scsih_is_end_device(
  9510. le32_to_cpu(sas_device_pg0.DeviceInfo))))
  9511. continue;
  9512. port_id = sas_device_pg0.PhysicalPort;
  9513. sas_device = mpt3sas_get_sdev_by_addr(ioc,
  9514. le64_to_cpu(sas_device_pg0.SASAddress),
  9515. mpt3sas_get_port_by_id(ioc, port_id, 0));
  9516. if (sas_device) {
  9517. sas_device_put(sas_device);
  9518. continue;
  9519. }
  9520. parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
  9521. if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
  9522. ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
  9523. handle,
  9524. (u64)le64_to_cpu(sas_device_pg0.SASAddress));
  9525. mpt3sas_transport_update_links(ioc, sas_address, handle,
  9526. sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
  9527. mpt3sas_get_port_by_id(ioc, port_id, 0));
  9528. retry_count = 0;
  9529. /* This will retry adding the end device.
  9530. * _scsih_add_device() will decide on retries and
  9531. * return "1" when it should be retried
  9532. */
  9533. while (_scsih_add_device(ioc, handle, retry_count++,
  9534. 0)) {
  9535. ssleep(1);
  9536. }
  9537. ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
  9538. handle,
  9539. (u64)le64_to_cpu(sas_device_pg0.SASAddress));
  9540. }
  9541. }
  9542. ioc_info(ioc, "\tscan devices: end devices complete\n");
  9543. ioc_info(ioc, "\tscan devices: pcie end devices start\n");
  9544. /* pcie devices */
  9545. handle = 0xFFFF;
  9546. while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
  9547. &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
  9548. handle))) {
  9549. ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
  9550. & MPI2_IOCSTATUS_MASK;
  9551. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  9552. ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
  9553. ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
  9554. break;
  9555. }
  9556. handle = le16_to_cpu(pcie_device_pg0.DevHandle);
  9557. if (!(_scsih_is_nvme_pciescsi_device(
  9558. le32_to_cpu(pcie_device_pg0.DeviceInfo))))
  9559. continue;
  9560. pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
  9561. le64_to_cpu(pcie_device_pg0.WWID));
  9562. if (pcie_device) {
  9563. pcie_device_put(pcie_device);
  9564. continue;
  9565. }
  9566. retry_count = 0;
  9567. parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
  9568. _scsih_pcie_add_device(ioc, handle);
  9569. ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
  9570. handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
  9571. }
  9572. kfree(volume_pg0);
  9573. kfree(volume_pg1);
  9574. ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
  9575. ioc_info(ioc, "scan devices: complete\n");
  9576. }
  9577. /**
  9578. * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
  9579. * @ioc: per adapter object
  9580. *
  9581. * The handler for doing any required cleanup or initialization.
  9582. */
  9583. void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
  9584. {
  9585. dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
  9586. }
  9587. /**
  9588. * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
  9589. * scsi & tm cmds.
  9590. * @ioc: per adapter object
  9591. *
  9592. * The handler for doing any required cleanup or initialization.
  9593. */
  9594. void
  9595. mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
  9596. {
  9597. dtmprintk(ioc,
  9598. ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
  9599. if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
  9600. ioc->scsih_cmds.status |= MPT3_CMD_RESET;
  9601. mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
  9602. complete(&ioc->scsih_cmds.done);
  9603. }
  9604. if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
  9605. ioc->tm_cmds.status |= MPT3_CMD_RESET;
  9606. mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
  9607. complete(&ioc->tm_cmds.done);
  9608. }
  9609. memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
  9610. memset(ioc->device_remove_in_progress, 0,
  9611. ioc->device_remove_in_progress_sz);
  9612. _scsih_fw_event_cleanup_queue(ioc);
  9613. _scsih_flush_running_cmds(ioc);
  9614. }
  9615. /**
  9616. * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
  9617. * @ioc: per adapter object
  9618. *
  9619. * The handler for doing any required cleanup or initialization.
  9620. */
  9621. void
  9622. mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
  9623. {
  9624. dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
  9625. if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
  9626. if (ioc->multipath_on_hba) {
  9627. _scsih_sas_port_refresh(ioc);
  9628. _scsih_update_vphys_after_reset(ioc);
  9629. }
  9630. _scsih_prep_device_scan(ioc);
  9631. _scsih_create_enclosure_list_after_reset(ioc);
  9632. _scsih_search_responding_sas_devices(ioc);
  9633. _scsih_search_responding_pcie_devices(ioc);
  9634. _scsih_search_responding_raid_devices(ioc);
  9635. _scsih_search_responding_expanders(ioc);
  9636. _scsih_error_recovery_delete_devices(ioc);
  9637. }
  9638. }
  9639. /**
  9640. * _mpt3sas_fw_work - delayed task for processing firmware events
  9641. * @ioc: per adapter object
  9642. * @fw_event: The fw_event_work object
  9643. * Context: user.
  9644. */
  9645. static void
  9646. _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
  9647. {
  9648. ioc->current_event = fw_event;
  9649. _scsih_fw_event_del_from_list(ioc, fw_event);
  9650. /* the queue is being flushed so ignore this event */
  9651. if (ioc->remove_host || ioc->pci_error_recovery) {
  9652. fw_event_work_put(fw_event);
  9653. ioc->current_event = NULL;
  9654. return;
  9655. }
  9656. switch (fw_event->event) {
  9657. case MPT3SAS_PROCESS_TRIGGER_DIAG:
  9658. mpt3sas_process_trigger_data(ioc,
  9659. (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
  9660. fw_event->event_data);
  9661. break;
  9662. case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
  9663. while (scsi_host_in_recovery(ioc->shost) ||
  9664. ioc->shost_recovery) {
  9665. /*
  9666. * If we're unloading or cancelling the work, bail.
  9667. * Otherwise, this can become an infinite loop.
  9668. */
  9669. if (ioc->remove_host || ioc->fw_events_cleanup)
  9670. goto out;
  9671. ssleep(1);
  9672. }
  9673. _scsih_remove_unresponding_devices(ioc);
  9674. _scsih_del_dirty_vphy(ioc);
  9675. _scsih_del_dirty_port_entries(ioc);
  9676. if (ioc->is_gen35_ioc)
  9677. _scsih_update_device_qdepth(ioc);
  9678. _scsih_scan_for_devices_after_reset(ioc);
  9679. /*
  9680. * If diag reset has occurred during the driver load
  9681. * then driver has to complete the driver load operation
  9682. * by executing the following items:
  9683. *- Register the devices from sas_device_init_list to SML
  9684. *- clear is_driver_loading flag,
  9685. *- start the watchdog thread.
  9686. * In happy driver load path, above things are taken care of when
  9687. * driver executes scsih_scan_finished().
  9688. */
  9689. if (ioc->is_driver_loading)
  9690. _scsih_complete_devices_scanning(ioc);
  9691. _scsih_set_nvme_max_shutdown_latency(ioc);
  9692. break;
  9693. case MPT3SAS_PORT_ENABLE_COMPLETE:
  9694. ioc->start_scan = 0;
  9695. if (missing_delay[0] != -1 && missing_delay[1] != -1)
  9696. mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
  9697. missing_delay[1]);
  9698. dewtprintk(ioc,
  9699. ioc_info(ioc, "port enable: complete from worker thread\n"));
  9700. break;
  9701. case MPT3SAS_TURN_ON_PFA_LED:
  9702. _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
  9703. break;
  9704. case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
  9705. _scsih_sas_topology_change_event(ioc, fw_event);
  9706. break;
  9707. case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
  9708. if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
  9709. _scsih_sas_device_status_change_event_debug(ioc,
  9710. (Mpi2EventDataSasDeviceStatusChange_t *)
  9711. fw_event->event_data);
  9712. break;
  9713. case MPI2_EVENT_SAS_DISCOVERY:
  9714. _scsih_sas_discovery_event(ioc, fw_event);
  9715. break;
  9716. case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
  9717. _scsih_sas_device_discovery_error_event(ioc, fw_event);
  9718. break;
  9719. case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
  9720. _scsih_sas_broadcast_primitive_event(ioc, fw_event);
  9721. break;
  9722. case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
  9723. _scsih_sas_enclosure_dev_status_change_event(ioc,
  9724. fw_event);
  9725. break;
  9726. case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
  9727. _scsih_sas_ir_config_change_event(ioc, fw_event);
  9728. break;
  9729. case MPI2_EVENT_IR_VOLUME:
  9730. _scsih_sas_ir_volume_event(ioc, fw_event);
  9731. break;
  9732. case MPI2_EVENT_IR_PHYSICAL_DISK:
  9733. _scsih_sas_ir_physical_disk_event(ioc, fw_event);
  9734. break;
  9735. case MPI2_EVENT_IR_OPERATION_STATUS:
  9736. _scsih_sas_ir_operation_status_event(ioc, fw_event);
  9737. break;
  9738. case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
  9739. _scsih_pcie_device_status_change_event(ioc, fw_event);
  9740. break;
  9741. case MPI2_EVENT_PCIE_ENUMERATION:
  9742. _scsih_pcie_enumeration_event(ioc, fw_event);
  9743. break;
  9744. case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
  9745. _scsih_pcie_topology_change_event(ioc, fw_event);
  9746. ioc->current_event = NULL;
  9747. return;
  9748. }
  9749. out:
  9750. fw_event_work_put(fw_event);
  9751. ioc->current_event = NULL;
  9752. }
  9753. /**
  9754. * _firmware_event_work
  9755. * @work: The fw_event_work object
  9756. * Context: user.
  9757. *
  9758. * wrappers for the work thread handling firmware events
  9759. */
  9760. static void
  9761. _firmware_event_work(struct work_struct *work)
  9762. {
  9763. struct fw_event_work *fw_event = container_of(work,
  9764. struct fw_event_work, work);
  9765. _mpt3sas_fw_work(fw_event->ioc, fw_event);
  9766. }
  9767. /**
  9768. * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
  9769. * @ioc: per adapter object
  9770. * @msix_index: MSIX table index supplied by the OS
  9771. * @reply: reply message frame(lower 32bit addr)
  9772. * Context: interrupt.
  9773. *
  9774. * This function merely adds a new work task into ioc->firmware_event_thread.
  9775. * The tasks are worked from _firmware_event_work in user context.
  9776. *
  9777. * Return: 1 meaning mf should be freed from _base_interrupt
  9778. * 0 means the mf is freed from this function.
  9779. */
  9780. u8
  9781. mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
  9782. u32 reply)
  9783. {
  9784. struct fw_event_work *fw_event;
  9785. Mpi2EventNotificationReply_t *mpi_reply;
  9786. u16 event;
  9787. u16 sz;
  9788. Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
  9789. /* events turned off due to host reset */
  9790. if (ioc->pci_error_recovery)
  9791. return 1;
  9792. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  9793. if (unlikely(!mpi_reply)) {
  9794. ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
  9795. __FILE__, __LINE__, __func__);
  9796. return 1;
  9797. }
  9798. event = le16_to_cpu(mpi_reply->Event);
  9799. if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
  9800. mpt3sas_trigger_event(ioc, event, 0);
  9801. switch (event) {
  9802. /* handle these */
  9803. case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
  9804. {
  9805. Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
  9806. (Mpi2EventDataSasBroadcastPrimitive_t *)
  9807. mpi_reply->EventData;
  9808. if (baen_data->Primitive !=
  9809. MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
  9810. return 1;
  9811. if (ioc->broadcast_aen_busy) {
  9812. ioc->broadcast_aen_pending++;
  9813. return 1;
  9814. } else
  9815. ioc->broadcast_aen_busy = 1;
  9816. break;
  9817. }
  9818. case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
  9819. _scsih_check_topo_delete_events(ioc,
  9820. (Mpi2EventDataSasTopologyChangeList_t *)
  9821. mpi_reply->EventData);
  9822. /*
  9823. * No need to add the topology change list
  9824. * event to fw event work queue when
  9825. * diag reset is going on. Since during diag
  9826. * reset driver scan the devices by reading
  9827. * sas device page0's not by processing the
  9828. * events.
  9829. */
  9830. if (ioc->shost_recovery)
  9831. return 1;
  9832. break;
  9833. case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
  9834. _scsih_check_pcie_topo_remove_events(ioc,
  9835. (Mpi26EventDataPCIeTopologyChangeList_t *)
  9836. mpi_reply->EventData);
  9837. if (ioc->shost_recovery)
  9838. return 1;
  9839. break;
  9840. case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
  9841. _scsih_check_ir_config_unhide_events(ioc,
  9842. (Mpi2EventDataIrConfigChangeList_t *)
  9843. mpi_reply->EventData);
  9844. break;
  9845. case MPI2_EVENT_IR_VOLUME:
  9846. _scsih_check_volume_delete_events(ioc,
  9847. (Mpi2EventDataIrVolume_t *)
  9848. mpi_reply->EventData);
  9849. break;
  9850. case MPI2_EVENT_LOG_ENTRY_ADDED:
  9851. {
  9852. Mpi2EventDataLogEntryAdded_t *log_entry;
  9853. u32 log_code;
  9854. if (!ioc->is_warpdrive)
  9855. break;
  9856. log_entry = (Mpi2EventDataLogEntryAdded_t *)
  9857. mpi_reply->EventData;
  9858. log_code = le32_to_cpu(*(__le32 *)log_entry->LogData);
  9859. if (le16_to_cpu(log_entry->LogEntryQualifier)
  9860. != MPT2_WARPDRIVE_LOGENTRY)
  9861. break;
  9862. switch (log_code) {
  9863. case MPT2_WARPDRIVE_LC_SSDT:
  9864. ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
  9865. break;
  9866. case MPT2_WARPDRIVE_LC_SSDLW:
  9867. ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
  9868. break;
  9869. case MPT2_WARPDRIVE_LC_SSDLF:
  9870. ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
  9871. break;
  9872. case MPT2_WARPDRIVE_LC_BRMF:
  9873. ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
  9874. break;
  9875. }
  9876. break;
  9877. }
  9878. case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
  9879. _scsih_sas_device_status_change_event(ioc,
  9880. (Mpi2EventDataSasDeviceStatusChange_t *)
  9881. mpi_reply->EventData);
  9882. break;
  9883. case MPI2_EVENT_IR_OPERATION_STATUS:
  9884. case MPI2_EVENT_SAS_DISCOVERY:
  9885. case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
  9886. case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
  9887. case MPI2_EVENT_IR_PHYSICAL_DISK:
  9888. case MPI2_EVENT_PCIE_ENUMERATION:
  9889. case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
  9890. break;
  9891. case MPI2_EVENT_TEMP_THRESHOLD:
  9892. _scsih_temp_threshold_events(ioc,
  9893. (Mpi2EventDataTemperature_t *)
  9894. mpi_reply->EventData);
  9895. break;
  9896. case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
  9897. ActiveCableEventData =
  9898. (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
  9899. switch (ActiveCableEventData->ReasonCode) {
  9900. case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
  9901. ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
  9902. ActiveCableEventData->ReceptacleID);
  9903. pr_notice("cannot be powered and devices connected\n");
  9904. pr_notice("to this active cable will not be seen\n");
  9905. pr_notice("This active cable requires %d mW of power\n",
  9906. le32_to_cpu(
  9907. ActiveCableEventData->ActiveCablePowerRequirement));
  9908. break;
  9909. case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
  9910. ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
  9911. ActiveCableEventData->ReceptacleID);
  9912. pr_notice(
  9913. "is not running at optimal speed(12 Gb/s rate)\n");
  9914. break;
  9915. }
  9916. break;
  9917. default: /* ignore the rest */
  9918. return 1;
  9919. }
  9920. sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
  9921. fw_event = alloc_fw_event_work(sz);
  9922. if (!fw_event) {
  9923. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  9924. __FILE__, __LINE__, __func__);
  9925. return 1;
  9926. }
  9927. memcpy(fw_event->event_data, mpi_reply->EventData, sz);
  9928. fw_event->ioc = ioc;
  9929. fw_event->VF_ID = mpi_reply->VF_ID;
  9930. fw_event->VP_ID = mpi_reply->VP_ID;
  9931. fw_event->event = event;
  9932. _scsih_fw_event_add(ioc, fw_event);
  9933. fw_event_work_put(fw_event);
  9934. return 1;
  9935. }
  9936. /**
  9937. * _scsih_expander_node_remove - removing expander device from list.
  9938. * @ioc: per adapter object
  9939. * @sas_expander: the sas_device object
  9940. *
  9941. * Removing object and freeing associated memory from the
  9942. * ioc->sas_expander_list.
  9943. */
  9944. static void
  9945. _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
  9946. struct _sas_node *sas_expander)
  9947. {
  9948. struct _sas_port *mpt3sas_port, *next;
  9949. unsigned long flags;
  9950. int port_id;
  9951. /* remove sibling ports attached to this expander */
  9952. list_for_each_entry_safe(mpt3sas_port, next,
  9953. &sas_expander->sas_port_list, port_list) {
  9954. if (ioc->shost_recovery)
  9955. return;
  9956. if (mpt3sas_port->remote_identify.device_type ==
  9957. SAS_END_DEVICE)
  9958. mpt3sas_device_remove_by_sas_address(ioc,
  9959. mpt3sas_port->remote_identify.sas_address,
  9960. mpt3sas_port->hba_port);
  9961. else if (mpt3sas_port->remote_identify.device_type ==
  9962. SAS_EDGE_EXPANDER_DEVICE ||
  9963. mpt3sas_port->remote_identify.device_type ==
  9964. SAS_FANOUT_EXPANDER_DEVICE)
  9965. mpt3sas_expander_remove(ioc,
  9966. mpt3sas_port->remote_identify.sas_address,
  9967. mpt3sas_port->hba_port);
  9968. }
  9969. port_id = sas_expander->port->port_id;
  9970. mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
  9971. sas_expander->sas_address_parent, sas_expander->port);
  9972. ioc_info(ioc,
  9973. "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
  9974. sas_expander->handle, (unsigned long long)
  9975. sas_expander->sas_address,
  9976. port_id);
  9977. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  9978. list_del(&sas_expander->list);
  9979. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  9980. kfree(sas_expander->phy);
  9981. kfree(sas_expander);
  9982. }
  9983. /**
  9984. * _scsih_nvme_shutdown - NVMe shutdown notification
  9985. * @ioc: per adapter object
  9986. *
  9987. * Sending IoUnitControl request with shutdown operation code to alert IOC that
  9988. * the host system is shutting down so that IOC can issue NVMe shutdown to
  9989. * NVMe drives attached to it.
  9990. */
  9991. static void
  9992. _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
  9993. {
  9994. Mpi26IoUnitControlRequest_t *mpi_request;
  9995. Mpi26IoUnitControlReply_t *mpi_reply;
  9996. u16 smid;
  9997. /* are there any NVMe devices ? */
  9998. if (list_empty(&ioc->pcie_device_list))
  9999. return;
  10000. mutex_lock(&ioc->scsih_cmds.mutex);
  10001. if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
  10002. ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
  10003. goto out;
  10004. }
  10005. ioc->scsih_cmds.status = MPT3_CMD_PENDING;
  10006. smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
  10007. if (!smid) {
  10008. ioc_err(ioc,
  10009. "%s: failed obtaining a smid\n", __func__);
  10010. ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
  10011. goto out;
  10012. }
  10013. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  10014. ioc->scsih_cmds.smid = smid;
  10015. memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
  10016. mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
  10017. mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
  10018. init_completion(&ioc->scsih_cmds.done);
  10019. ioc->put_smid_default(ioc, smid);
  10020. /* Wait for max_shutdown_latency seconds */
  10021. ioc_info(ioc,
  10022. "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
  10023. ioc->max_shutdown_latency);
  10024. wait_for_completion_timeout(&ioc->scsih_cmds.done,
  10025. ioc->max_shutdown_latency*HZ);
  10026. if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
  10027. ioc_err(ioc, "%s: timeout\n", __func__);
  10028. goto out;
  10029. }
  10030. if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
  10031. mpi_reply = ioc->scsih_cmds.reply;
  10032. ioc_info(ioc, "Io Unit Control shutdown (complete):"
  10033. "ioc_status(0x%04x), loginfo(0x%08x)\n",
  10034. le16_to_cpu(mpi_reply->IOCStatus),
  10035. le32_to_cpu(mpi_reply->IOCLogInfo));
  10036. }
  10037. out:
  10038. ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
  10039. mutex_unlock(&ioc->scsih_cmds.mutex);
  10040. }
  10041. /**
  10042. * _scsih_ir_shutdown - IR shutdown notification
  10043. * @ioc: per adapter object
  10044. *
  10045. * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
  10046. * the host system is shutting down.
  10047. */
  10048. static void
  10049. _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
  10050. {
  10051. Mpi2RaidActionRequest_t *mpi_request;
  10052. Mpi2RaidActionReply_t *mpi_reply;
  10053. u16 smid;
  10054. /* is IR firmware build loaded ? */
  10055. if (!ioc->ir_firmware)
  10056. return;
  10057. /* are there any volumes ? */
  10058. if (list_empty(&ioc->raid_device_list))
  10059. return;
  10060. mutex_lock(&ioc->scsih_cmds.mutex);
  10061. if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
  10062. ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
  10063. goto out;
  10064. }
  10065. ioc->scsih_cmds.status = MPT3_CMD_PENDING;
  10066. smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
  10067. if (!smid) {
  10068. ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
  10069. ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
  10070. goto out;
  10071. }
  10072. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  10073. ioc->scsih_cmds.smid = smid;
  10074. memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
  10075. mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
  10076. mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
  10077. if (!ioc->hide_ir_msg)
  10078. ioc_info(ioc, "IR shutdown (sending)\n");
  10079. init_completion(&ioc->scsih_cmds.done);
  10080. ioc->put_smid_default(ioc, smid);
  10081. wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
  10082. if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
  10083. ioc_err(ioc, "%s: timeout\n", __func__);
  10084. goto out;
  10085. }
  10086. if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
  10087. mpi_reply = ioc->scsih_cmds.reply;
  10088. if (!ioc->hide_ir_msg)
  10089. ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
  10090. le16_to_cpu(mpi_reply->IOCStatus),
  10091. le32_to_cpu(mpi_reply->IOCLogInfo));
  10092. }
  10093. out:
  10094. ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
  10095. mutex_unlock(&ioc->scsih_cmds.mutex);
  10096. }
  10097. /**
  10098. * _scsih_get_shost_and_ioc - get shost and ioc
  10099. * and verify whether they are NULL or not
  10100. * @pdev: PCI device struct
  10101. * @shost: address of scsi host pointer
  10102. * @ioc: address of HBA adapter pointer
  10103. *
  10104. * Return zero if *shost and *ioc are not NULL otherwise return error number.
  10105. */
  10106. static int
  10107. _scsih_get_shost_and_ioc(struct pci_dev *pdev,
  10108. struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
  10109. {
  10110. *shost = pci_get_drvdata(pdev);
  10111. if (*shost == NULL) {
  10112. dev_err(&pdev->dev, "pdev's driver data is null\n");
  10113. return -ENXIO;
  10114. }
  10115. *ioc = shost_priv(*shost);
  10116. if (*ioc == NULL) {
  10117. dev_err(&pdev->dev, "shost's private data is null\n");
  10118. return -ENXIO;
  10119. }
  10120. return 0;
  10121. }
  10122. /**
  10123. * scsih_remove - detach and remove add host
  10124. * @pdev: PCI device struct
  10125. *
  10126. * Routine called when unloading the driver.
  10127. */
  10128. static void scsih_remove(struct pci_dev *pdev)
  10129. {
  10130. struct Scsi_Host *shost;
  10131. struct MPT3SAS_ADAPTER *ioc;
  10132. struct _sas_port *mpt3sas_port, *next_port;
  10133. struct _raid_device *raid_device, *next;
  10134. struct MPT3SAS_TARGET *sas_target_priv_data;
  10135. struct _pcie_device *pcie_device, *pcienext;
  10136. struct workqueue_struct *wq;
  10137. unsigned long flags;
  10138. Mpi2ConfigReply_t mpi_reply;
  10139. struct hba_port *port, *port_next;
  10140. if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
  10141. return;
  10142. ioc->remove_host = 1;
  10143. if (!pci_device_is_present(pdev)) {
  10144. mpt3sas_base_pause_mq_polling(ioc);
  10145. _scsih_flush_running_cmds(ioc);
  10146. }
  10147. _scsih_fw_event_cleanup_queue(ioc);
  10148. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  10149. wq = ioc->firmware_event_thread;
  10150. ioc->firmware_event_thread = NULL;
  10151. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  10152. if (wq)
  10153. destroy_workqueue(wq);
  10154. /*
  10155. * Copy back the unmodified ioc page1. so that on next driver load,
  10156. * current modified changes on ioc page1 won't take effect.
  10157. */
  10158. if (ioc->is_aero_ioc)
  10159. mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
  10160. &ioc->ioc_pg1_copy);
  10161. /* release all the volumes */
  10162. _scsih_ir_shutdown(ioc);
  10163. mpt3sas_destroy_debugfs(ioc);
  10164. sas_remove_host(shost);
  10165. list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
  10166. list) {
  10167. if (raid_device->starget) {
  10168. sas_target_priv_data =
  10169. raid_device->starget->hostdata;
  10170. sas_target_priv_data->deleted = 1;
  10171. scsi_remove_target(&raid_device->starget->dev);
  10172. }
  10173. ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
  10174. raid_device->handle, (u64)raid_device->wwid);
  10175. _scsih_raid_device_remove(ioc, raid_device);
  10176. }
  10177. list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
  10178. list) {
  10179. _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
  10180. list_del_init(&pcie_device->list);
  10181. pcie_device_put(pcie_device);
  10182. }
  10183. /* free ports attached to the sas_host */
  10184. list_for_each_entry_safe(mpt3sas_port, next_port,
  10185. &ioc->sas_hba.sas_port_list, port_list) {
  10186. if (mpt3sas_port->remote_identify.device_type ==
  10187. SAS_END_DEVICE)
  10188. mpt3sas_device_remove_by_sas_address(ioc,
  10189. mpt3sas_port->remote_identify.sas_address,
  10190. mpt3sas_port->hba_port);
  10191. else if (mpt3sas_port->remote_identify.device_type ==
  10192. SAS_EDGE_EXPANDER_DEVICE ||
  10193. mpt3sas_port->remote_identify.device_type ==
  10194. SAS_FANOUT_EXPANDER_DEVICE)
  10195. mpt3sas_expander_remove(ioc,
  10196. mpt3sas_port->remote_identify.sas_address,
  10197. mpt3sas_port->hba_port);
  10198. }
  10199. list_for_each_entry_safe(port, port_next,
  10200. &ioc->port_table_list, list) {
  10201. list_del(&port->list);
  10202. kfree(port);
  10203. }
  10204. /* free phys attached to the sas_host */
  10205. if (ioc->sas_hba.num_phys) {
  10206. kfree(ioc->sas_hba.phy);
  10207. ioc->sas_hba.phy = NULL;
  10208. ioc->sas_hba.num_phys = 0;
  10209. }
  10210. mpt3sas_base_detach(ioc);
  10211. spin_lock(&gioc_lock);
  10212. list_del(&ioc->list);
  10213. spin_unlock(&gioc_lock);
  10214. scsi_host_put(shost);
  10215. }
  10216. /**
  10217. * scsih_shutdown - routine call during system shutdown
  10218. * @pdev: PCI device struct
  10219. */
  10220. static void
  10221. scsih_shutdown(struct pci_dev *pdev)
  10222. {
  10223. struct Scsi_Host *shost;
  10224. struct MPT3SAS_ADAPTER *ioc;
  10225. struct workqueue_struct *wq;
  10226. unsigned long flags;
  10227. Mpi2ConfigReply_t mpi_reply;
  10228. if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
  10229. return;
  10230. ioc->remove_host = 1;
  10231. if (!pci_device_is_present(pdev)) {
  10232. mpt3sas_base_pause_mq_polling(ioc);
  10233. _scsih_flush_running_cmds(ioc);
  10234. }
  10235. _scsih_fw_event_cleanup_queue(ioc);
  10236. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  10237. wq = ioc->firmware_event_thread;
  10238. ioc->firmware_event_thread = NULL;
  10239. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  10240. if (wq)
  10241. destroy_workqueue(wq);
  10242. /*
  10243. * Copy back the unmodified ioc page1 so that on next driver load,
  10244. * current modified changes on ioc page1 won't take effect.
  10245. */
  10246. if (ioc->is_aero_ioc)
  10247. mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
  10248. &ioc->ioc_pg1_copy);
  10249. _scsih_ir_shutdown(ioc);
  10250. _scsih_nvme_shutdown(ioc);
  10251. mpt3sas_base_mask_interrupts(ioc);
  10252. mpt3sas_base_stop_watchdog(ioc);
  10253. ioc->shost_recovery = 1;
  10254. mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
  10255. ioc->shost_recovery = 0;
  10256. mpt3sas_base_free_irq(ioc);
  10257. mpt3sas_base_disable_msix(ioc);
  10258. }
  10259. /**
  10260. * _scsih_probe_boot_devices - reports 1st device
  10261. * @ioc: per adapter object
  10262. *
  10263. * If specified in bios page 2, this routine reports the 1st
  10264. * device scsi-ml or sas transport for persistent boot device
  10265. * purposes. Please refer to function _scsih_determine_boot_device()
  10266. */
  10267. static void
  10268. _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
  10269. {
  10270. u32 channel;
  10271. void *device;
  10272. struct _sas_device *sas_device;
  10273. struct _raid_device *raid_device;
  10274. struct _pcie_device *pcie_device;
  10275. u16 handle;
  10276. u64 sas_address_parent;
  10277. u64 sas_address;
  10278. unsigned long flags;
  10279. int rc;
  10280. int tid;
  10281. struct hba_port *port;
  10282. /* no Bios, return immediately */
  10283. if (!ioc->bios_pg3.BiosVersion)
  10284. return;
  10285. device = NULL;
  10286. if (ioc->req_boot_device.device) {
  10287. device = ioc->req_boot_device.device;
  10288. channel = ioc->req_boot_device.channel;
  10289. } else if (ioc->req_alt_boot_device.device) {
  10290. device = ioc->req_alt_boot_device.device;
  10291. channel = ioc->req_alt_boot_device.channel;
  10292. } else if (ioc->current_boot_device.device) {
  10293. device = ioc->current_boot_device.device;
  10294. channel = ioc->current_boot_device.channel;
  10295. }
  10296. if (!device)
  10297. return;
  10298. if (channel == RAID_CHANNEL) {
  10299. raid_device = device;
  10300. /*
  10301. * If this boot vd is already registered with SML then
  10302. * no need to register it again as part of device scanning
  10303. * after diag reset during driver load operation.
  10304. */
  10305. if (raid_device->starget)
  10306. return;
  10307. rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
  10308. raid_device->id, 0);
  10309. if (rc)
  10310. _scsih_raid_device_remove(ioc, raid_device);
  10311. } else if (channel == PCIE_CHANNEL) {
  10312. pcie_device = device;
  10313. /*
  10314. * If this boot NVMe device is already registered with SML then
  10315. * no need to register it again as part of device scanning
  10316. * after diag reset during driver load operation.
  10317. */
  10318. if (pcie_device->starget)
  10319. return;
  10320. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  10321. tid = pcie_device->id;
  10322. list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
  10323. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  10324. rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
  10325. if (rc)
  10326. _scsih_pcie_device_remove(ioc, pcie_device);
  10327. } else {
  10328. sas_device = device;
  10329. /*
  10330. * If this boot sas/sata device is already registered with SML
  10331. * then no need to register it again as part of device scanning
  10332. * after diag reset during driver load operation.
  10333. */
  10334. if (sas_device->starget)
  10335. return;
  10336. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  10337. handle = sas_device->handle;
  10338. sas_address_parent = sas_device->sas_address_parent;
  10339. sas_address = sas_device->sas_address;
  10340. port = sas_device->port;
  10341. list_move_tail(&sas_device->list, &ioc->sas_device_list);
  10342. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  10343. if (ioc->hide_drives)
  10344. return;
  10345. if (!port)
  10346. return;
  10347. if (!mpt3sas_transport_port_add(ioc, handle,
  10348. sas_address_parent, port)) {
  10349. _scsih_sas_device_remove(ioc, sas_device);
  10350. } else if (!sas_device->starget) {
  10351. if (!ioc->is_driver_loading) {
  10352. mpt3sas_transport_port_remove(ioc,
  10353. sas_address,
  10354. sas_address_parent, port);
  10355. _scsih_sas_device_remove(ioc, sas_device);
  10356. }
  10357. }
  10358. }
  10359. }
  10360. /**
  10361. * _scsih_probe_raid - reporting raid volumes to scsi-ml
  10362. * @ioc: per adapter object
  10363. *
  10364. * Called during initial loading of the driver.
  10365. */
  10366. static void
  10367. _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
  10368. {
  10369. struct _raid_device *raid_device, *raid_next;
  10370. int rc;
  10371. list_for_each_entry_safe(raid_device, raid_next,
  10372. &ioc->raid_device_list, list) {
  10373. if (raid_device->starget)
  10374. continue;
  10375. rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
  10376. raid_device->id, 0);
  10377. if (rc)
  10378. _scsih_raid_device_remove(ioc, raid_device);
  10379. }
  10380. }
  10381. static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
  10382. {
  10383. struct _sas_device *sas_device = NULL;
  10384. unsigned long flags;
  10385. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  10386. if (!list_empty(&ioc->sas_device_init_list)) {
  10387. sas_device = list_first_entry(&ioc->sas_device_init_list,
  10388. struct _sas_device, list);
  10389. sas_device_get(sas_device);
  10390. }
  10391. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  10392. return sas_device;
  10393. }
  10394. static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
  10395. struct _sas_device *sas_device)
  10396. {
  10397. unsigned long flags;
  10398. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  10399. /*
  10400. * Since we dropped the lock during the call to port_add(), we need to
  10401. * be careful here that somebody else didn't move or delete this item
  10402. * while we were busy with other things.
  10403. *
  10404. * If it was on the list, we need a put() for the reference the list
  10405. * had. Either way, we need a get() for the destination list.
  10406. */
  10407. if (!list_empty(&sas_device->list)) {
  10408. list_del_init(&sas_device->list);
  10409. sas_device_put(sas_device);
  10410. }
  10411. sas_device_get(sas_device);
  10412. list_add_tail(&sas_device->list, &ioc->sas_device_list);
  10413. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  10414. }
  10415. /**
  10416. * _scsih_probe_sas - reporting sas devices to sas transport
  10417. * @ioc: per adapter object
  10418. *
  10419. * Called during initial loading of the driver.
  10420. */
  10421. static void
  10422. _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
  10423. {
  10424. struct _sas_device *sas_device;
  10425. if (ioc->hide_drives)
  10426. return;
  10427. while ((sas_device = get_next_sas_device(ioc))) {
  10428. if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
  10429. sas_device->sas_address_parent, sas_device->port)) {
  10430. _scsih_sas_device_remove(ioc, sas_device);
  10431. sas_device_put(sas_device);
  10432. continue;
  10433. } else if (!sas_device->starget) {
  10434. /*
  10435. * When asyn scanning is enabled, its not possible to
  10436. * remove devices while scanning is turned on due to an
  10437. * oops in scsi_sysfs_add_sdev()->add_device()->
  10438. * sysfs_addrm_start()
  10439. */
  10440. if (!ioc->is_driver_loading) {
  10441. mpt3sas_transport_port_remove(ioc,
  10442. sas_device->sas_address,
  10443. sas_device->sas_address_parent,
  10444. sas_device->port);
  10445. _scsih_sas_device_remove(ioc, sas_device);
  10446. sas_device_put(sas_device);
  10447. continue;
  10448. }
  10449. }
  10450. sas_device_make_active(ioc, sas_device);
  10451. sas_device_put(sas_device);
  10452. }
  10453. }
  10454. /**
  10455. * get_next_pcie_device - Get the next pcie device
  10456. * @ioc: per adapter object
  10457. *
  10458. * Get the next pcie device from pcie_device_init_list list.
  10459. *
  10460. * Return: pcie device structure if pcie_device_init_list list is not empty
  10461. * otherwise returns NULL
  10462. */
  10463. static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
  10464. {
  10465. struct _pcie_device *pcie_device = NULL;
  10466. unsigned long flags;
  10467. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  10468. if (!list_empty(&ioc->pcie_device_init_list)) {
  10469. pcie_device = list_first_entry(&ioc->pcie_device_init_list,
  10470. struct _pcie_device, list);
  10471. pcie_device_get(pcie_device);
  10472. }
  10473. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  10474. return pcie_device;
  10475. }
  10476. /**
  10477. * pcie_device_make_active - Add pcie device to pcie_device_list list
  10478. * @ioc: per adapter object
  10479. * @pcie_device: pcie device object
  10480. *
  10481. * Add the pcie device which has registered with SCSI Transport Later to
  10482. * pcie_device_list list
  10483. */
  10484. static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
  10485. struct _pcie_device *pcie_device)
  10486. {
  10487. unsigned long flags;
  10488. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  10489. if (!list_empty(&pcie_device->list)) {
  10490. list_del_init(&pcie_device->list);
  10491. pcie_device_put(pcie_device);
  10492. }
  10493. pcie_device_get(pcie_device);
  10494. list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
  10495. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  10496. }
  10497. /**
  10498. * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
  10499. * @ioc: per adapter object
  10500. *
  10501. * Called during initial loading of the driver.
  10502. */
  10503. static void
  10504. _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
  10505. {
  10506. struct _pcie_device *pcie_device;
  10507. int rc;
  10508. /* PCIe Device List */
  10509. while ((pcie_device = get_next_pcie_device(ioc))) {
  10510. if (pcie_device->starget) {
  10511. pcie_device_put(pcie_device);
  10512. continue;
  10513. }
  10514. if (pcie_device->access_status ==
  10515. MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
  10516. pcie_device_make_active(ioc, pcie_device);
  10517. pcie_device_put(pcie_device);
  10518. continue;
  10519. }
  10520. rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
  10521. pcie_device->id, 0);
  10522. if (rc) {
  10523. _scsih_pcie_device_remove(ioc, pcie_device);
  10524. pcie_device_put(pcie_device);
  10525. continue;
  10526. } else if (!pcie_device->starget) {
  10527. /*
  10528. * When async scanning is enabled, its not possible to
  10529. * remove devices while scanning is turned on due to an
  10530. * oops in scsi_sysfs_add_sdev()->add_device()->
  10531. * sysfs_addrm_start()
  10532. */
  10533. if (!ioc->is_driver_loading) {
  10534. /* TODO-- Need to find out whether this condition will
  10535. * occur or not
  10536. */
  10537. _scsih_pcie_device_remove(ioc, pcie_device);
  10538. pcie_device_put(pcie_device);
  10539. continue;
  10540. }
  10541. }
  10542. pcie_device_make_active(ioc, pcie_device);
  10543. pcie_device_put(pcie_device);
  10544. }
  10545. }
  10546. /**
  10547. * _scsih_probe_devices - probing for devices
  10548. * @ioc: per adapter object
  10549. *
  10550. * Called during initial loading of the driver.
  10551. */
  10552. static void
  10553. _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
  10554. {
  10555. u16 volume_mapping_flags;
  10556. if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
  10557. return; /* return when IOC doesn't support initiator mode */
  10558. _scsih_probe_boot_devices(ioc);
  10559. if (ioc->ir_firmware) {
  10560. volume_mapping_flags =
  10561. le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
  10562. MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
  10563. if (volume_mapping_flags ==
  10564. MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
  10565. _scsih_probe_raid(ioc);
  10566. _scsih_probe_sas(ioc);
  10567. } else {
  10568. _scsih_probe_sas(ioc);
  10569. _scsih_probe_raid(ioc);
  10570. }
  10571. } else {
  10572. _scsih_probe_sas(ioc);
  10573. _scsih_probe_pcie(ioc);
  10574. }
  10575. }
  10576. /**
  10577. * scsih_scan_start - scsi lld callback for .scan_start
  10578. * @shost: SCSI host pointer
  10579. *
  10580. * The shost has the ability to discover targets on its own instead
  10581. * of scanning the entire bus. In our implemention, we will kick off
  10582. * firmware discovery.
  10583. */
  10584. static void
  10585. scsih_scan_start(struct Scsi_Host *shost)
  10586. {
  10587. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  10588. int rc;
  10589. if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
  10590. mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
  10591. else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
  10592. mpt3sas_enable_diag_buffer(ioc, 1);
  10593. if (disable_discovery > 0)
  10594. return;
  10595. ioc->start_scan = 1;
  10596. rc = mpt3sas_port_enable(ioc);
  10597. if (rc != 0)
  10598. ioc_info(ioc, "port enable: FAILED\n");
  10599. }
  10600. /**
  10601. * _scsih_complete_devices_scanning - add the devices to sml and
  10602. * complete ioc initialization.
  10603. * @ioc: per adapter object
  10604. *
  10605. * Return nothing.
  10606. */
  10607. static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc)
  10608. {
  10609. if (ioc->wait_for_discovery_to_complete) {
  10610. ioc->wait_for_discovery_to_complete = 0;
  10611. _scsih_probe_devices(ioc);
  10612. }
  10613. mpt3sas_base_start_watchdog(ioc);
  10614. ioc->is_driver_loading = 0;
  10615. }
  10616. /**
  10617. * scsih_scan_finished - scsi lld callback for .scan_finished
  10618. * @shost: SCSI host pointer
  10619. * @time: elapsed time of the scan in jiffies
  10620. *
  10621. * This function will be called periodicallyn until it returns 1 with the
  10622. * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
  10623. * we wait for firmware discovery to complete, then return 1.
  10624. */
  10625. static int
  10626. scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
  10627. {
  10628. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  10629. u32 ioc_state;
  10630. int issue_hard_reset = 0;
  10631. if (disable_discovery > 0) {
  10632. ioc->is_driver_loading = 0;
  10633. ioc->wait_for_discovery_to_complete = 0;
  10634. return 1;
  10635. }
  10636. if (time >= (300 * HZ)) {
  10637. ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
  10638. ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
  10639. ioc->is_driver_loading = 0;
  10640. return 1;
  10641. }
  10642. if (ioc->start_scan) {
  10643. ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
  10644. if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
  10645. mpt3sas_print_fault_code(ioc, ioc_state &
  10646. MPI2_DOORBELL_DATA_MASK);
  10647. issue_hard_reset = 1;
  10648. goto out;
  10649. } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
  10650. MPI2_IOC_STATE_COREDUMP) {
  10651. mpt3sas_base_coredump_info(ioc, ioc_state &
  10652. MPI2_DOORBELL_DATA_MASK);
  10653. mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
  10654. issue_hard_reset = 1;
  10655. goto out;
  10656. }
  10657. return 0;
  10658. }
  10659. if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) {
  10660. ioc_info(ioc,
  10661. "port enable: aborted due to diag reset\n");
  10662. ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
  10663. goto out;
  10664. }
  10665. if (ioc->start_scan_failed) {
  10666. ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
  10667. ioc->start_scan_failed);
  10668. ioc->is_driver_loading = 0;
  10669. ioc->wait_for_discovery_to_complete = 0;
  10670. ioc->remove_host = 1;
  10671. return 1;
  10672. }
  10673. ioc_info(ioc, "port enable: SUCCESS\n");
  10674. ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
  10675. _scsih_complete_devices_scanning(ioc);
  10676. out:
  10677. if (issue_hard_reset) {
  10678. ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
  10679. if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET))
  10680. ioc->is_driver_loading = 0;
  10681. }
  10682. return 1;
  10683. }
  10684. /**
  10685. * scsih_map_queues - map reply queues with request queues
  10686. * @shost: SCSI host pointer
  10687. */
  10688. static void scsih_map_queues(struct Scsi_Host *shost)
  10689. {
  10690. struct MPT3SAS_ADAPTER *ioc =
  10691. (struct MPT3SAS_ADAPTER *)shost->hostdata;
  10692. struct blk_mq_queue_map *map;
  10693. int i, qoff, offset;
  10694. int nr_msix_vectors = ioc->iopoll_q_start_index;
  10695. int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
  10696. if (shost->nr_hw_queues == 1)
  10697. return;
  10698. for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
  10699. map = &shost->tag_set.map[i];
  10700. map->nr_queues = 0;
  10701. offset = 0;
  10702. if (i == HCTX_TYPE_DEFAULT) {
  10703. map->nr_queues =
  10704. nr_msix_vectors - ioc->high_iops_queues;
  10705. offset = ioc->high_iops_queues;
  10706. } else if (i == HCTX_TYPE_POLL)
  10707. map->nr_queues = iopoll_q_count;
  10708. if (!map->nr_queues)
  10709. BUG_ON(i == HCTX_TYPE_DEFAULT);
  10710. /*
  10711. * The poll queue(s) doesn't have an IRQ (and hence IRQ
  10712. * affinity), so use the regular blk-mq cpu mapping
  10713. */
  10714. map->queue_offset = qoff;
  10715. if (i != HCTX_TYPE_POLL)
  10716. blk_mq_pci_map_queues(map, ioc->pdev, offset);
  10717. else
  10718. blk_mq_map_queues(map);
  10719. qoff += map->nr_queues;
  10720. }
  10721. }
  10722. /* shost template for SAS 2.0 HBA devices */
  10723. static struct scsi_host_template mpt2sas_driver_template = {
  10724. .module = THIS_MODULE,
  10725. .name = "Fusion MPT SAS Host",
  10726. .proc_name = MPT2SAS_DRIVER_NAME,
  10727. .queuecommand = scsih_qcmd,
  10728. .target_alloc = scsih_target_alloc,
  10729. .slave_alloc = scsih_slave_alloc,
  10730. .slave_configure = scsih_slave_configure,
  10731. .target_destroy = scsih_target_destroy,
  10732. .slave_destroy = scsih_slave_destroy,
  10733. .scan_finished = scsih_scan_finished,
  10734. .scan_start = scsih_scan_start,
  10735. .change_queue_depth = scsih_change_queue_depth,
  10736. .eh_abort_handler = scsih_abort,
  10737. .eh_device_reset_handler = scsih_dev_reset,
  10738. .eh_target_reset_handler = scsih_target_reset,
  10739. .eh_host_reset_handler = scsih_host_reset,
  10740. .bios_param = scsih_bios_param,
  10741. .can_queue = 1,
  10742. .this_id = -1,
  10743. .sg_tablesize = MPT2SAS_SG_DEPTH,
  10744. .max_sectors = 32767,
  10745. .cmd_per_lun = 7,
  10746. .shost_groups = mpt3sas_host_groups,
  10747. .sdev_groups = mpt3sas_dev_groups,
  10748. .track_queue_depth = 1,
  10749. .cmd_size = sizeof(struct scsiio_tracker),
  10750. };
  10751. /* raid transport support for SAS 2.0 HBA devices */
  10752. static struct raid_function_template mpt2sas_raid_functions = {
  10753. .cookie = &mpt2sas_driver_template,
  10754. .is_raid = scsih_is_raid,
  10755. .get_resync = scsih_get_resync,
  10756. .get_state = scsih_get_state,
  10757. };
  10758. /* shost template for SAS 3.0 HBA devices */
  10759. static struct scsi_host_template mpt3sas_driver_template = {
  10760. .module = THIS_MODULE,
  10761. .name = "Fusion MPT SAS Host",
  10762. .proc_name = MPT3SAS_DRIVER_NAME,
  10763. .queuecommand = scsih_qcmd,
  10764. .target_alloc = scsih_target_alloc,
  10765. .slave_alloc = scsih_slave_alloc,
  10766. .slave_configure = scsih_slave_configure,
  10767. .target_destroy = scsih_target_destroy,
  10768. .slave_destroy = scsih_slave_destroy,
  10769. .scan_finished = scsih_scan_finished,
  10770. .scan_start = scsih_scan_start,
  10771. .change_queue_depth = scsih_change_queue_depth,
  10772. .eh_abort_handler = scsih_abort,
  10773. .eh_device_reset_handler = scsih_dev_reset,
  10774. .eh_target_reset_handler = scsih_target_reset,
  10775. .eh_host_reset_handler = scsih_host_reset,
  10776. .bios_param = scsih_bios_param,
  10777. .can_queue = 1,
  10778. .this_id = -1,
  10779. .sg_tablesize = MPT3SAS_SG_DEPTH,
  10780. .max_sectors = 32767,
  10781. .max_segment_size = 0xffffffff,
  10782. .cmd_per_lun = 128,
  10783. .shost_groups = mpt3sas_host_groups,
  10784. .sdev_groups = mpt3sas_dev_groups,
  10785. .track_queue_depth = 1,
  10786. .cmd_size = sizeof(struct scsiio_tracker),
  10787. .map_queues = scsih_map_queues,
  10788. .mq_poll = mpt3sas_blk_mq_poll,
  10789. };
  10790. /* raid transport support for SAS 3.0 HBA devices */
  10791. static struct raid_function_template mpt3sas_raid_functions = {
  10792. .cookie = &mpt3sas_driver_template,
  10793. .is_raid = scsih_is_raid,
  10794. .get_resync = scsih_get_resync,
  10795. .get_state = scsih_get_state,
  10796. };
  10797. /**
  10798. * _scsih_determine_hba_mpi_version - determine in which MPI version class
  10799. * this device belongs to.
  10800. * @pdev: PCI device struct
  10801. *
  10802. * return MPI2_VERSION for SAS 2.0 HBA devices,
  10803. * MPI25_VERSION for SAS 3.0 HBA devices, and
  10804. * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
  10805. */
  10806. static u16
  10807. _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
  10808. {
  10809. switch (pdev->device) {
  10810. case MPI2_MFGPAGE_DEVID_SSS6200:
  10811. case MPI2_MFGPAGE_DEVID_SAS2004:
  10812. case MPI2_MFGPAGE_DEVID_SAS2008:
  10813. case MPI2_MFGPAGE_DEVID_SAS2108_1:
  10814. case MPI2_MFGPAGE_DEVID_SAS2108_2:
  10815. case MPI2_MFGPAGE_DEVID_SAS2108_3:
  10816. case MPI2_MFGPAGE_DEVID_SAS2116_1:
  10817. case MPI2_MFGPAGE_DEVID_SAS2116_2:
  10818. case MPI2_MFGPAGE_DEVID_SAS2208_1:
  10819. case MPI2_MFGPAGE_DEVID_SAS2208_2:
  10820. case MPI2_MFGPAGE_DEVID_SAS2208_3:
  10821. case MPI2_MFGPAGE_DEVID_SAS2208_4:
  10822. case MPI2_MFGPAGE_DEVID_SAS2208_5:
  10823. case MPI2_MFGPAGE_DEVID_SAS2208_6:
  10824. case MPI2_MFGPAGE_DEVID_SAS2308_1:
  10825. case MPI2_MFGPAGE_DEVID_SAS2308_2:
  10826. case MPI2_MFGPAGE_DEVID_SAS2308_3:
  10827. case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
  10828. case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
  10829. return MPI2_VERSION;
  10830. case MPI25_MFGPAGE_DEVID_SAS3004:
  10831. case MPI25_MFGPAGE_DEVID_SAS3008:
  10832. case MPI25_MFGPAGE_DEVID_SAS3108_1:
  10833. case MPI25_MFGPAGE_DEVID_SAS3108_2:
  10834. case MPI25_MFGPAGE_DEVID_SAS3108_5:
  10835. case MPI25_MFGPAGE_DEVID_SAS3108_6:
  10836. return MPI25_VERSION;
  10837. case MPI26_MFGPAGE_DEVID_SAS3216:
  10838. case MPI26_MFGPAGE_DEVID_SAS3224:
  10839. case MPI26_MFGPAGE_DEVID_SAS3316_1:
  10840. case MPI26_MFGPAGE_DEVID_SAS3316_2:
  10841. case MPI26_MFGPAGE_DEVID_SAS3316_3:
  10842. case MPI26_MFGPAGE_DEVID_SAS3316_4:
  10843. case MPI26_MFGPAGE_DEVID_SAS3324_1:
  10844. case MPI26_MFGPAGE_DEVID_SAS3324_2:
  10845. case MPI26_MFGPAGE_DEVID_SAS3324_3:
  10846. case MPI26_MFGPAGE_DEVID_SAS3324_4:
  10847. case MPI26_MFGPAGE_DEVID_SAS3508:
  10848. case MPI26_MFGPAGE_DEVID_SAS3508_1:
  10849. case MPI26_MFGPAGE_DEVID_SAS3408:
  10850. case MPI26_MFGPAGE_DEVID_SAS3516:
  10851. case MPI26_MFGPAGE_DEVID_SAS3516_1:
  10852. case MPI26_MFGPAGE_DEVID_SAS3416:
  10853. case MPI26_MFGPAGE_DEVID_SAS3616:
  10854. case MPI26_ATLAS_PCIe_SWITCH_DEVID:
  10855. case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
  10856. case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
  10857. case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
  10858. case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
  10859. case MPI26_MFGPAGE_DEVID_INVALID0_3916:
  10860. case MPI26_MFGPAGE_DEVID_INVALID1_3916:
  10861. case MPI26_MFGPAGE_DEVID_INVALID0_3816:
  10862. case MPI26_MFGPAGE_DEVID_INVALID1_3816:
  10863. return MPI26_VERSION;
  10864. }
  10865. return 0;
  10866. }
  10867. /**
  10868. * _scsih_probe - attach and add scsi host
  10869. * @pdev: PCI device struct
  10870. * @id: pci device id
  10871. *
  10872. * Return: 0 success, anything else error.
  10873. */
  10874. static int
  10875. _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  10876. {
  10877. struct MPT3SAS_ADAPTER *ioc;
  10878. struct Scsi_Host *shost = NULL;
  10879. int rv;
  10880. u16 hba_mpi_version;
  10881. int iopoll_q_count = 0;
  10882. /* Determine in which MPI version class this pci device belongs */
  10883. hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
  10884. if (hba_mpi_version == 0)
  10885. return -ENODEV;
  10886. /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
  10887. * for other generation HBA's return with -ENODEV
  10888. */
  10889. if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
  10890. return -ENODEV;
  10891. /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
  10892. * for other generation HBA's return with -ENODEV
  10893. */
  10894. if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
  10895. || hba_mpi_version == MPI26_VERSION)))
  10896. return -ENODEV;
  10897. switch (hba_mpi_version) {
  10898. case MPI2_VERSION:
  10899. pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
  10900. PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
  10901. /* Use mpt2sas driver host template for SAS 2.0 HBA's */
  10902. shost = scsi_host_alloc(&mpt2sas_driver_template,
  10903. sizeof(struct MPT3SAS_ADAPTER));
  10904. if (!shost)
  10905. return -ENODEV;
  10906. ioc = shost_priv(shost);
  10907. memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
  10908. ioc->hba_mpi_version_belonged = hba_mpi_version;
  10909. ioc->id = mpt2_ids++;
  10910. sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
  10911. switch (pdev->device) {
  10912. case MPI2_MFGPAGE_DEVID_SSS6200:
  10913. ioc->is_warpdrive = 1;
  10914. ioc->hide_ir_msg = 1;
  10915. break;
  10916. case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
  10917. case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
  10918. ioc->is_mcpu_endpoint = 1;
  10919. break;
  10920. default:
  10921. ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
  10922. break;
  10923. }
  10924. if (multipath_on_hba == -1 || multipath_on_hba == 0)
  10925. ioc->multipath_on_hba = 0;
  10926. else
  10927. ioc->multipath_on_hba = 1;
  10928. break;
  10929. case MPI25_VERSION:
  10930. case MPI26_VERSION:
  10931. /* Use mpt3sas driver host template for SAS 3.0 HBA's */
  10932. shost = scsi_host_alloc(&mpt3sas_driver_template,
  10933. sizeof(struct MPT3SAS_ADAPTER));
  10934. if (!shost)
  10935. return -ENODEV;
  10936. ioc = shost_priv(shost);
  10937. memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
  10938. ioc->hba_mpi_version_belonged = hba_mpi_version;
  10939. ioc->id = mpt3_ids++;
  10940. sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
  10941. switch (pdev->device) {
  10942. case MPI26_MFGPAGE_DEVID_SAS3508:
  10943. case MPI26_MFGPAGE_DEVID_SAS3508_1:
  10944. case MPI26_MFGPAGE_DEVID_SAS3408:
  10945. case MPI26_MFGPAGE_DEVID_SAS3516:
  10946. case MPI26_MFGPAGE_DEVID_SAS3516_1:
  10947. case MPI26_MFGPAGE_DEVID_SAS3416:
  10948. case MPI26_MFGPAGE_DEVID_SAS3616:
  10949. case MPI26_ATLAS_PCIe_SWITCH_DEVID:
  10950. ioc->is_gen35_ioc = 1;
  10951. break;
  10952. case MPI26_MFGPAGE_DEVID_INVALID0_3816:
  10953. case MPI26_MFGPAGE_DEVID_INVALID0_3916:
  10954. dev_err(&pdev->dev,
  10955. "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
  10956. pdev->device, pdev->subsystem_vendor,
  10957. pdev->subsystem_device);
  10958. return 1;
  10959. case MPI26_MFGPAGE_DEVID_INVALID1_3816:
  10960. case MPI26_MFGPAGE_DEVID_INVALID1_3916:
  10961. dev_err(&pdev->dev,
  10962. "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
  10963. pdev->device, pdev->subsystem_vendor,
  10964. pdev->subsystem_device);
  10965. return 1;
  10966. case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
  10967. case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
  10968. dev_info(&pdev->dev,
  10969. "HBA is in Configurable Secure mode\n");
  10970. fallthrough;
  10971. case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
  10972. case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
  10973. ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
  10974. break;
  10975. default:
  10976. ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
  10977. }
  10978. if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
  10979. pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
  10980. (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
  10981. ioc->combined_reply_queue = 1;
  10982. if (ioc->is_gen35_ioc)
  10983. ioc->combined_reply_index_count =
  10984. MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
  10985. else
  10986. ioc->combined_reply_index_count =
  10987. MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
  10988. }
  10989. switch (ioc->is_gen35_ioc) {
  10990. case 0:
  10991. if (multipath_on_hba == -1 || multipath_on_hba == 0)
  10992. ioc->multipath_on_hba = 0;
  10993. else
  10994. ioc->multipath_on_hba = 1;
  10995. break;
  10996. case 1:
  10997. if (multipath_on_hba == -1 || multipath_on_hba > 0)
  10998. ioc->multipath_on_hba = 1;
  10999. else
  11000. ioc->multipath_on_hba = 0;
  11001. break;
  11002. default:
  11003. break;
  11004. }
  11005. break;
  11006. default:
  11007. return -ENODEV;
  11008. }
  11009. INIT_LIST_HEAD(&ioc->list);
  11010. spin_lock(&gioc_lock);
  11011. list_add_tail(&ioc->list, &mpt3sas_ioc_list);
  11012. spin_unlock(&gioc_lock);
  11013. ioc->shost = shost;
  11014. ioc->pdev = pdev;
  11015. ioc->scsi_io_cb_idx = scsi_io_cb_idx;
  11016. ioc->tm_cb_idx = tm_cb_idx;
  11017. ioc->ctl_cb_idx = ctl_cb_idx;
  11018. ioc->base_cb_idx = base_cb_idx;
  11019. ioc->port_enable_cb_idx = port_enable_cb_idx;
  11020. ioc->transport_cb_idx = transport_cb_idx;
  11021. ioc->scsih_cb_idx = scsih_cb_idx;
  11022. ioc->config_cb_idx = config_cb_idx;
  11023. ioc->tm_tr_cb_idx = tm_tr_cb_idx;
  11024. ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
  11025. ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
  11026. ioc->logging_level = logging_level;
  11027. ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
  11028. /* Host waits for minimum of six seconds */
  11029. ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
  11030. /*
  11031. * Enable MEMORY MOVE support flag.
  11032. */
  11033. ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
  11034. /* Enable ADDITIONAL QUERY support flag. */
  11035. ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
  11036. ioc->enable_sdev_max_qd = enable_sdev_max_qd;
  11037. /* misc semaphores and spin locks */
  11038. mutex_init(&ioc->reset_in_progress_mutex);
  11039. /* initializing pci_access_mutex lock */
  11040. mutex_init(&ioc->pci_access_mutex);
  11041. spin_lock_init(&ioc->ioc_reset_in_progress_lock);
  11042. spin_lock_init(&ioc->scsi_lookup_lock);
  11043. spin_lock_init(&ioc->sas_device_lock);
  11044. spin_lock_init(&ioc->sas_node_lock);
  11045. spin_lock_init(&ioc->fw_event_lock);
  11046. spin_lock_init(&ioc->raid_device_lock);
  11047. spin_lock_init(&ioc->pcie_device_lock);
  11048. spin_lock_init(&ioc->diag_trigger_lock);
  11049. INIT_LIST_HEAD(&ioc->sas_device_list);
  11050. INIT_LIST_HEAD(&ioc->sas_device_init_list);
  11051. INIT_LIST_HEAD(&ioc->sas_expander_list);
  11052. INIT_LIST_HEAD(&ioc->enclosure_list);
  11053. INIT_LIST_HEAD(&ioc->pcie_device_list);
  11054. INIT_LIST_HEAD(&ioc->pcie_device_init_list);
  11055. INIT_LIST_HEAD(&ioc->fw_event_list);
  11056. INIT_LIST_HEAD(&ioc->raid_device_list);
  11057. INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
  11058. INIT_LIST_HEAD(&ioc->delayed_tr_list);
  11059. INIT_LIST_HEAD(&ioc->delayed_sc_list);
  11060. INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
  11061. INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
  11062. INIT_LIST_HEAD(&ioc->reply_queue_list);
  11063. INIT_LIST_HEAD(&ioc->port_table_list);
  11064. sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
  11065. /* init shost parameters */
  11066. shost->max_cmd_len = 32;
  11067. shost->max_lun = max_lun;
  11068. shost->transportt = mpt3sas_transport_template;
  11069. shost->unique_id = ioc->id;
  11070. if (ioc->is_mcpu_endpoint) {
  11071. /* mCPU MPI support 64K max IO */
  11072. shost->max_sectors = 128;
  11073. ioc_info(ioc, "The max_sectors value is set to %d\n",
  11074. shost->max_sectors);
  11075. } else {
  11076. if (max_sectors != 0xFFFF) {
  11077. if (max_sectors < 64) {
  11078. shost->max_sectors = 64;
  11079. ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
  11080. max_sectors);
  11081. } else if (max_sectors > 32767) {
  11082. shost->max_sectors = 32767;
  11083. ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
  11084. max_sectors);
  11085. } else {
  11086. shost->max_sectors = max_sectors & 0xFFFE;
  11087. ioc_info(ioc, "The max_sectors value is set to %d\n",
  11088. shost->max_sectors);
  11089. }
  11090. }
  11091. }
  11092. /* register EEDP capabilities with SCSI layer */
  11093. if (prot_mask >= 0)
  11094. scsi_host_set_prot(shost, (prot_mask & 0x07));
  11095. else
  11096. scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
  11097. | SHOST_DIF_TYPE2_PROTECTION
  11098. | SHOST_DIF_TYPE3_PROTECTION);
  11099. scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
  11100. /* event thread */
  11101. snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
  11102. "fw_event_%s%d", ioc->driver_name, ioc->id);
  11103. ioc->firmware_event_thread = alloc_ordered_workqueue(
  11104. ioc->firmware_event_name, 0);
  11105. if (!ioc->firmware_event_thread) {
  11106. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  11107. __FILE__, __LINE__, __func__);
  11108. rv = -ENODEV;
  11109. goto out_thread_fail;
  11110. }
  11111. shost->host_tagset = 0;
  11112. if (ioc->is_gen35_ioc && host_tagset_enable)
  11113. shost->host_tagset = 1;
  11114. ioc->is_driver_loading = 1;
  11115. if ((mpt3sas_base_attach(ioc))) {
  11116. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  11117. __FILE__, __LINE__, __func__);
  11118. rv = -ENODEV;
  11119. goto out_attach_fail;
  11120. }
  11121. if (ioc->is_warpdrive) {
  11122. if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
  11123. ioc->hide_drives = 0;
  11124. else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
  11125. ioc->hide_drives = 1;
  11126. else {
  11127. if (mpt3sas_get_num_volumes(ioc))
  11128. ioc->hide_drives = 1;
  11129. else
  11130. ioc->hide_drives = 0;
  11131. }
  11132. } else
  11133. ioc->hide_drives = 0;
  11134. shost->nr_hw_queues = 1;
  11135. if (shost->host_tagset) {
  11136. shost->nr_hw_queues =
  11137. ioc->reply_queue_count - ioc->high_iops_queues;
  11138. iopoll_q_count =
  11139. ioc->reply_queue_count - ioc->iopoll_q_start_index;
  11140. shost->nr_maps = iopoll_q_count ? 3 : 1;
  11141. dev_info(&ioc->pdev->dev,
  11142. "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
  11143. shost->can_queue, shost->nr_hw_queues);
  11144. }
  11145. rv = scsi_add_host(shost, &pdev->dev);
  11146. if (rv) {
  11147. ioc_err(ioc, "failure at %s:%d/%s()!\n",
  11148. __FILE__, __LINE__, __func__);
  11149. goto out_add_shost_fail;
  11150. }
  11151. scsi_scan_host(shost);
  11152. mpt3sas_setup_debugfs(ioc);
  11153. return 0;
  11154. out_add_shost_fail:
  11155. mpt3sas_base_detach(ioc);
  11156. out_attach_fail:
  11157. destroy_workqueue(ioc->firmware_event_thread);
  11158. out_thread_fail:
  11159. spin_lock(&gioc_lock);
  11160. list_del(&ioc->list);
  11161. spin_unlock(&gioc_lock);
  11162. scsi_host_put(shost);
  11163. return rv;
  11164. }
  11165. /**
  11166. * scsih_suspend - power management suspend main entry point
  11167. * @dev: Device struct
  11168. *
  11169. * Return: 0 success, anything else error.
  11170. */
  11171. static int __maybe_unused
  11172. scsih_suspend(struct device *dev)
  11173. {
  11174. struct pci_dev *pdev = to_pci_dev(dev);
  11175. struct Scsi_Host *shost;
  11176. struct MPT3SAS_ADAPTER *ioc;
  11177. int rc;
  11178. rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
  11179. if (rc)
  11180. return rc;
  11181. mpt3sas_base_stop_watchdog(ioc);
  11182. scsi_block_requests(shost);
  11183. _scsih_nvme_shutdown(ioc);
  11184. ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
  11185. pdev, pci_name(pdev));
  11186. mpt3sas_base_free_resources(ioc);
  11187. return 0;
  11188. }
  11189. /**
  11190. * scsih_resume - power management resume main entry point
  11191. * @dev: Device struct
  11192. *
  11193. * Return: 0 success, anything else error.
  11194. */
  11195. static int __maybe_unused
  11196. scsih_resume(struct device *dev)
  11197. {
  11198. struct pci_dev *pdev = to_pci_dev(dev);
  11199. struct Scsi_Host *shost;
  11200. struct MPT3SAS_ADAPTER *ioc;
  11201. pci_power_t device_state = pdev->current_state;
  11202. int r;
  11203. r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
  11204. if (r)
  11205. return r;
  11206. ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
  11207. pdev, pci_name(pdev), device_state);
  11208. ioc->pdev = pdev;
  11209. r = mpt3sas_base_map_resources(ioc);
  11210. if (r)
  11211. return r;
  11212. ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
  11213. mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
  11214. scsi_unblock_requests(shost);
  11215. mpt3sas_base_start_watchdog(ioc);
  11216. return 0;
  11217. }
  11218. /**
  11219. * scsih_pci_error_detected - Called when a PCI error is detected.
  11220. * @pdev: PCI device struct
  11221. * @state: PCI channel state
  11222. *
  11223. * Description: Called when a PCI error is detected.
  11224. *
  11225. * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
  11226. */
  11227. static pci_ers_result_t
  11228. scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
  11229. {
  11230. struct Scsi_Host *shost;
  11231. struct MPT3SAS_ADAPTER *ioc;
  11232. if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
  11233. return PCI_ERS_RESULT_DISCONNECT;
  11234. ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
  11235. switch (state) {
  11236. case pci_channel_io_normal:
  11237. return PCI_ERS_RESULT_CAN_RECOVER;
  11238. case pci_channel_io_frozen:
  11239. /* Fatal error, prepare for slot reset */
  11240. ioc->pci_error_recovery = 1;
  11241. scsi_block_requests(ioc->shost);
  11242. mpt3sas_base_stop_watchdog(ioc);
  11243. mpt3sas_base_free_resources(ioc);
  11244. return PCI_ERS_RESULT_NEED_RESET;
  11245. case pci_channel_io_perm_failure:
  11246. /* Permanent error, prepare for device removal */
  11247. ioc->pci_error_recovery = 1;
  11248. mpt3sas_base_stop_watchdog(ioc);
  11249. mpt3sas_base_pause_mq_polling(ioc);
  11250. _scsih_flush_running_cmds(ioc);
  11251. return PCI_ERS_RESULT_DISCONNECT;
  11252. }
  11253. return PCI_ERS_RESULT_NEED_RESET;
  11254. }
  11255. /**
  11256. * scsih_pci_slot_reset - Called when PCI slot has been reset.
  11257. * @pdev: PCI device struct
  11258. *
  11259. * Description: This routine is called by the pci error recovery
  11260. * code after the PCI slot has been reset, just before we
  11261. * should resume normal operations.
  11262. */
  11263. static pci_ers_result_t
  11264. scsih_pci_slot_reset(struct pci_dev *pdev)
  11265. {
  11266. struct Scsi_Host *shost;
  11267. struct MPT3SAS_ADAPTER *ioc;
  11268. int rc;
  11269. if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
  11270. return PCI_ERS_RESULT_DISCONNECT;
  11271. ioc_info(ioc, "PCI error: slot reset callback!!\n");
  11272. ioc->pci_error_recovery = 0;
  11273. ioc->pdev = pdev;
  11274. pci_restore_state(pdev);
  11275. rc = mpt3sas_base_map_resources(ioc);
  11276. if (rc)
  11277. return PCI_ERS_RESULT_DISCONNECT;
  11278. ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
  11279. rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  11280. ioc_warn(ioc, "hard reset: %s\n",
  11281. (rc == 0) ? "success" : "failed");
  11282. if (!rc)
  11283. return PCI_ERS_RESULT_RECOVERED;
  11284. else
  11285. return PCI_ERS_RESULT_DISCONNECT;
  11286. }
  11287. /**
  11288. * scsih_pci_resume() - resume normal ops after PCI reset
  11289. * @pdev: pointer to PCI device
  11290. *
  11291. * Called when the error recovery driver tells us that its
  11292. * OK to resume normal operation. Use completion to allow
  11293. * halted scsi ops to resume.
  11294. */
  11295. static void
  11296. scsih_pci_resume(struct pci_dev *pdev)
  11297. {
  11298. struct Scsi_Host *shost;
  11299. struct MPT3SAS_ADAPTER *ioc;
  11300. if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
  11301. return;
  11302. ioc_info(ioc, "PCI error: resume callback!!\n");
  11303. mpt3sas_base_start_watchdog(ioc);
  11304. scsi_unblock_requests(ioc->shost);
  11305. }
  11306. /**
  11307. * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
  11308. * @pdev: pointer to PCI device
  11309. */
  11310. static pci_ers_result_t
  11311. scsih_pci_mmio_enabled(struct pci_dev *pdev)
  11312. {
  11313. struct Scsi_Host *shost;
  11314. struct MPT3SAS_ADAPTER *ioc;
  11315. if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
  11316. return PCI_ERS_RESULT_DISCONNECT;
  11317. ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
  11318. /* TODO - dump whatever for debugging purposes */
  11319. /* This called only if scsih_pci_error_detected returns
  11320. * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
  11321. * works, no need to reset slot.
  11322. */
  11323. return PCI_ERS_RESULT_RECOVERED;
  11324. }
  11325. /**
  11326. * scsih_ncq_prio_supp - Check for NCQ command priority support
  11327. * @sdev: scsi device struct
  11328. *
  11329. * This is called when a user indicates they would like to enable
  11330. * ncq command priorities. This works only on SATA devices.
  11331. */
  11332. bool scsih_ncq_prio_supp(struct scsi_device *sdev)
  11333. {
  11334. struct scsi_vpd *vpd;
  11335. bool ncq_prio_supp = false;
  11336. rcu_read_lock();
  11337. vpd = rcu_dereference(sdev->vpd_pg89);
  11338. if (!vpd || vpd->len < 214)
  11339. goto out;
  11340. ncq_prio_supp = (vpd->data[213] >> 4) & 1;
  11341. out:
  11342. rcu_read_unlock();
  11343. return ncq_prio_supp;
  11344. }
  11345. /*
  11346. * The pci device ids are defined in mpi/mpi2_cnfg.h.
  11347. */
  11348. static const struct pci_device_id mpt3sas_pci_table[] = {
  11349. /* Spitfire ~ 2004 */
  11350. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
  11351. PCI_ANY_ID, PCI_ANY_ID },
  11352. /* Falcon ~ 2008 */
  11353. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
  11354. PCI_ANY_ID, PCI_ANY_ID },
  11355. /* Liberator ~ 2108 */
  11356. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
  11357. PCI_ANY_ID, PCI_ANY_ID },
  11358. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
  11359. PCI_ANY_ID, PCI_ANY_ID },
  11360. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
  11361. PCI_ANY_ID, PCI_ANY_ID },
  11362. /* Meteor ~ 2116 */
  11363. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
  11364. PCI_ANY_ID, PCI_ANY_ID },
  11365. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
  11366. PCI_ANY_ID, PCI_ANY_ID },
  11367. /* Thunderbolt ~ 2208 */
  11368. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
  11369. PCI_ANY_ID, PCI_ANY_ID },
  11370. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
  11371. PCI_ANY_ID, PCI_ANY_ID },
  11372. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
  11373. PCI_ANY_ID, PCI_ANY_ID },
  11374. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
  11375. PCI_ANY_ID, PCI_ANY_ID },
  11376. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
  11377. PCI_ANY_ID, PCI_ANY_ID },
  11378. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
  11379. PCI_ANY_ID, PCI_ANY_ID },
  11380. /* Mustang ~ 2308 */
  11381. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
  11382. PCI_ANY_ID, PCI_ANY_ID },
  11383. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
  11384. PCI_ANY_ID, PCI_ANY_ID },
  11385. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
  11386. PCI_ANY_ID, PCI_ANY_ID },
  11387. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
  11388. PCI_ANY_ID, PCI_ANY_ID },
  11389. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
  11390. PCI_ANY_ID, PCI_ANY_ID },
  11391. /* SSS6200 */
  11392. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
  11393. PCI_ANY_ID, PCI_ANY_ID },
  11394. /* Fury ~ 3004 and 3008 */
  11395. { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
  11396. PCI_ANY_ID, PCI_ANY_ID },
  11397. { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
  11398. PCI_ANY_ID, PCI_ANY_ID },
  11399. /* Invader ~ 3108 */
  11400. { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
  11401. PCI_ANY_ID, PCI_ANY_ID },
  11402. { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
  11403. PCI_ANY_ID, PCI_ANY_ID },
  11404. { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
  11405. PCI_ANY_ID, PCI_ANY_ID },
  11406. { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
  11407. PCI_ANY_ID, PCI_ANY_ID },
  11408. /* Cutlass ~ 3216 and 3224 */
  11409. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
  11410. PCI_ANY_ID, PCI_ANY_ID },
  11411. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
  11412. PCI_ANY_ID, PCI_ANY_ID },
  11413. /* Intruder ~ 3316 and 3324 */
  11414. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
  11415. PCI_ANY_ID, PCI_ANY_ID },
  11416. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
  11417. PCI_ANY_ID, PCI_ANY_ID },
  11418. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
  11419. PCI_ANY_ID, PCI_ANY_ID },
  11420. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
  11421. PCI_ANY_ID, PCI_ANY_ID },
  11422. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
  11423. PCI_ANY_ID, PCI_ANY_ID },
  11424. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
  11425. PCI_ANY_ID, PCI_ANY_ID },
  11426. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
  11427. PCI_ANY_ID, PCI_ANY_ID },
  11428. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
  11429. PCI_ANY_ID, PCI_ANY_ID },
  11430. /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
  11431. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
  11432. PCI_ANY_ID, PCI_ANY_ID },
  11433. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
  11434. PCI_ANY_ID, PCI_ANY_ID },
  11435. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
  11436. PCI_ANY_ID, PCI_ANY_ID },
  11437. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
  11438. PCI_ANY_ID, PCI_ANY_ID },
  11439. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
  11440. PCI_ANY_ID, PCI_ANY_ID },
  11441. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
  11442. PCI_ANY_ID, PCI_ANY_ID },
  11443. /* Mercator ~ 3616*/
  11444. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
  11445. PCI_ANY_ID, PCI_ANY_ID },
  11446. /* Aero SI 0x00E1 Configurable Secure
  11447. * 0x00E2 Hard Secure
  11448. */
  11449. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
  11450. PCI_ANY_ID, PCI_ANY_ID },
  11451. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
  11452. PCI_ANY_ID, PCI_ANY_ID },
  11453. /*
  11454. * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
  11455. */
  11456. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
  11457. PCI_ANY_ID, PCI_ANY_ID },
  11458. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
  11459. PCI_ANY_ID, PCI_ANY_ID },
  11460. /* Atlas PCIe Switch Management Port */
  11461. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
  11462. PCI_ANY_ID, PCI_ANY_ID },
  11463. /* Sea SI 0x00E5 Configurable Secure
  11464. * 0x00E6 Hard Secure
  11465. */
  11466. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
  11467. PCI_ANY_ID, PCI_ANY_ID },
  11468. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
  11469. PCI_ANY_ID, PCI_ANY_ID },
  11470. /*
  11471. * ATTO Branded ExpressSAS H12xx GT
  11472. */
  11473. { MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
  11474. PCI_ANY_ID, PCI_ANY_ID },
  11475. /*
  11476. * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
  11477. */
  11478. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
  11479. PCI_ANY_ID, PCI_ANY_ID },
  11480. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
  11481. PCI_ANY_ID, PCI_ANY_ID },
  11482. {0} /* Terminating entry */
  11483. };
  11484. MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
  11485. static struct pci_error_handlers _mpt3sas_err_handler = {
  11486. .error_detected = scsih_pci_error_detected,
  11487. .mmio_enabled = scsih_pci_mmio_enabled,
  11488. .slot_reset = scsih_pci_slot_reset,
  11489. .resume = scsih_pci_resume,
  11490. };
  11491. static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
  11492. static struct pci_driver mpt3sas_driver = {
  11493. .name = MPT3SAS_DRIVER_NAME,
  11494. .id_table = mpt3sas_pci_table,
  11495. .probe = _scsih_probe,
  11496. .remove = scsih_remove,
  11497. .shutdown = scsih_shutdown,
  11498. .err_handler = &_mpt3sas_err_handler,
  11499. .driver.pm = &scsih_pm_ops,
  11500. };
  11501. /**
  11502. * scsih_init - main entry point for this driver.
  11503. *
  11504. * Return: 0 success, anything else error.
  11505. */
  11506. static int
  11507. scsih_init(void)
  11508. {
  11509. mpt2_ids = 0;
  11510. mpt3_ids = 0;
  11511. mpt3sas_base_initialize_callback_handler();
  11512. /* queuecommand callback hander */
  11513. scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
  11514. /* task management callback handler */
  11515. tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
  11516. /* base internal commands callback handler */
  11517. base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
  11518. port_enable_cb_idx = mpt3sas_base_register_callback_handler(
  11519. mpt3sas_port_enable_done);
  11520. /* transport internal commands callback handler */
  11521. transport_cb_idx = mpt3sas_base_register_callback_handler(
  11522. mpt3sas_transport_done);
  11523. /* scsih internal commands callback handler */
  11524. scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
  11525. /* configuration page API internal commands callback handler */
  11526. config_cb_idx = mpt3sas_base_register_callback_handler(
  11527. mpt3sas_config_done);
  11528. /* ctl module callback handler */
  11529. ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
  11530. tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
  11531. _scsih_tm_tr_complete);
  11532. tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
  11533. _scsih_tm_volume_tr_complete);
  11534. tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
  11535. _scsih_sas_control_complete);
  11536. mpt3sas_init_debugfs();
  11537. return 0;
  11538. }
  11539. /**
  11540. * scsih_exit - exit point for this driver (when it is a module).
  11541. *
  11542. * Return: 0 success, anything else error.
  11543. */
  11544. static void
  11545. scsih_exit(void)
  11546. {
  11547. mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
  11548. mpt3sas_base_release_callback_handler(tm_cb_idx);
  11549. mpt3sas_base_release_callback_handler(base_cb_idx);
  11550. mpt3sas_base_release_callback_handler(port_enable_cb_idx);
  11551. mpt3sas_base_release_callback_handler(transport_cb_idx);
  11552. mpt3sas_base_release_callback_handler(scsih_cb_idx);
  11553. mpt3sas_base_release_callback_handler(config_cb_idx);
  11554. mpt3sas_base_release_callback_handler(ctl_cb_idx);
  11555. mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
  11556. mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
  11557. mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
  11558. /* raid transport support */
  11559. if (hbas_to_enumerate != 1)
  11560. raid_class_release(mpt3sas_raid_template);
  11561. if (hbas_to_enumerate != 2)
  11562. raid_class_release(mpt2sas_raid_template);
  11563. sas_release_transport(mpt3sas_transport_template);
  11564. mpt3sas_exit_debugfs();
  11565. }
  11566. /**
  11567. * _mpt3sas_init - main entry point for this driver.
  11568. *
  11569. * Return: 0 success, anything else error.
  11570. */
  11571. static int __init
  11572. _mpt3sas_init(void)
  11573. {
  11574. int error;
  11575. pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
  11576. MPT3SAS_DRIVER_VERSION);
  11577. mpt3sas_transport_template =
  11578. sas_attach_transport(&mpt3sas_transport_functions);
  11579. if (!mpt3sas_transport_template)
  11580. return -ENODEV;
  11581. /* No need attach mpt3sas raid functions template
  11582. * if hbas_to_enumarate value is one.
  11583. */
  11584. if (hbas_to_enumerate != 1) {
  11585. mpt3sas_raid_template =
  11586. raid_class_attach(&mpt3sas_raid_functions);
  11587. if (!mpt3sas_raid_template) {
  11588. sas_release_transport(mpt3sas_transport_template);
  11589. return -ENODEV;
  11590. }
  11591. }
  11592. /* No need to attach mpt2sas raid functions template
  11593. * if hbas_to_enumarate value is two
  11594. */
  11595. if (hbas_to_enumerate != 2) {
  11596. mpt2sas_raid_template =
  11597. raid_class_attach(&mpt2sas_raid_functions);
  11598. if (!mpt2sas_raid_template) {
  11599. sas_release_transport(mpt3sas_transport_template);
  11600. return -ENODEV;
  11601. }
  11602. }
  11603. error = scsih_init();
  11604. if (error) {
  11605. scsih_exit();
  11606. return error;
  11607. }
  11608. mpt3sas_ctl_init(hbas_to_enumerate);
  11609. error = pci_register_driver(&mpt3sas_driver);
  11610. if (error) {
  11611. mpt3sas_ctl_exit(hbas_to_enumerate);
  11612. scsih_exit();
  11613. }
  11614. return error;
  11615. }
  11616. /**
  11617. * _mpt3sas_exit - exit point for this driver (when it is a module).
  11618. *
  11619. */
  11620. static void __exit
  11621. _mpt3sas_exit(void)
  11622. {
  11623. pr_info("mpt3sas version %s unloading\n",
  11624. MPT3SAS_DRIVER_VERSION);
  11625. mpt3sas_ctl_exit(hbas_to_enumerate);
  11626. pci_unregister_driver(&mpt3sas_driver);
  11627. scsih_exit();
  11628. }
  11629. module_init(_mpt3sas_init);
  11630. module_exit(_mpt3sas_exit);