ipr.c 302 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * ipr.c -- driver for IBM Power Linux RAID adapters
  4. *
  5. * Written By: Brian King <[email protected]>, IBM Corporation
  6. *
  7. * Copyright (C) 2003, 2004 IBM Corporation
  8. */
  9. /*
  10. * Notes:
  11. *
  12. * This driver is used to control the following SCSI adapters:
  13. *
  14. * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
  15. *
  16. * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
  17. * PCI-X Dual Channel Ultra 320 SCSI Adapter
  18. * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
  19. * Embedded SCSI adapter on p615 and p655 systems
  20. *
  21. * Supported Hardware Features:
  22. * - Ultra 320 SCSI controller
  23. * - PCI-X host interface
  24. * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
  25. * - Non-Volatile Write Cache
  26. * - Supports attachment of non-RAID disks, tape, and optical devices
  27. * - RAID Levels 0, 5, 10
  28. * - Hot spare
  29. * - Background Parity Checking
  30. * - Background Data Scrubbing
  31. * - Ability to increase the capacity of an existing RAID 5 disk array
  32. * by adding disks
  33. *
  34. * Driver Features:
  35. * - Tagged command queuing
  36. * - Adapter microcode download
  37. * - PCI hot plug
  38. * - SCSI device hot plug
  39. *
  40. */
  41. #include <linux/fs.h>
  42. #include <linux/init.h>
  43. #include <linux/types.h>
  44. #include <linux/errno.h>
  45. #include <linux/kernel.h>
  46. #include <linux/slab.h>
  47. #include <linux/vmalloc.h>
  48. #include <linux/ioport.h>
  49. #include <linux/delay.h>
  50. #include <linux/pci.h>
  51. #include <linux/wait.h>
  52. #include <linux/spinlock.h>
  53. #include <linux/sched.h>
  54. #include <linux/interrupt.h>
  55. #include <linux/blkdev.h>
  56. #include <linux/firmware.h>
  57. #include <linux/module.h>
  58. #include <linux/moduleparam.h>
  59. #include <linux/libata.h>
  60. #include <linux/hdreg.h>
  61. #include <linux/reboot.h>
  62. #include <linux/stringify.h>
  63. #include <asm/io.h>
  64. #include <asm/irq.h>
  65. #include <asm/processor.h>
  66. #include <scsi/scsi.h>
  67. #include <scsi/scsi_host.h>
  68. #include <scsi/scsi_tcq.h>
  69. #include <scsi/scsi_eh.h>
  70. #include <scsi/scsi_cmnd.h>
  71. #include "ipr.h"
  72. /*
  73. * Global Data
  74. */
  75. static LIST_HEAD(ipr_ioa_head);
  76. static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
  77. static unsigned int ipr_max_speed = 1;
  78. static int ipr_testmode = 0;
  79. static unsigned int ipr_fastfail = 0;
  80. static unsigned int ipr_transop_timeout = 0;
  81. static unsigned int ipr_debug = 0;
  82. static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
  83. static unsigned int ipr_dual_ioa_raid = 1;
  84. static unsigned int ipr_number_of_msix = 16;
  85. static unsigned int ipr_fast_reboot;
  86. static DEFINE_SPINLOCK(ipr_driver_lock);
  87. /* This table describes the differences between DMA controller chips */
  88. static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
  89. { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
  90. .mailbox = 0x0042C,
  91. .max_cmds = 100,
  92. .cache_line_size = 0x20,
  93. .clear_isr = 1,
  94. .iopoll_weight = 0,
  95. {
  96. .set_interrupt_mask_reg = 0x0022C,
  97. .clr_interrupt_mask_reg = 0x00230,
  98. .clr_interrupt_mask_reg32 = 0x00230,
  99. .sense_interrupt_mask_reg = 0x0022C,
  100. .sense_interrupt_mask_reg32 = 0x0022C,
  101. .clr_interrupt_reg = 0x00228,
  102. .clr_interrupt_reg32 = 0x00228,
  103. .sense_interrupt_reg = 0x00224,
  104. .sense_interrupt_reg32 = 0x00224,
  105. .ioarrin_reg = 0x00404,
  106. .sense_uproc_interrupt_reg = 0x00214,
  107. .sense_uproc_interrupt_reg32 = 0x00214,
  108. .set_uproc_interrupt_reg = 0x00214,
  109. .set_uproc_interrupt_reg32 = 0x00214,
  110. .clr_uproc_interrupt_reg = 0x00218,
  111. .clr_uproc_interrupt_reg32 = 0x00218
  112. }
  113. },
  114. { /* Snipe and Scamp */
  115. .mailbox = 0x0052C,
  116. .max_cmds = 100,
  117. .cache_line_size = 0x20,
  118. .clear_isr = 1,
  119. .iopoll_weight = 0,
  120. {
  121. .set_interrupt_mask_reg = 0x00288,
  122. .clr_interrupt_mask_reg = 0x0028C,
  123. .clr_interrupt_mask_reg32 = 0x0028C,
  124. .sense_interrupt_mask_reg = 0x00288,
  125. .sense_interrupt_mask_reg32 = 0x00288,
  126. .clr_interrupt_reg = 0x00284,
  127. .clr_interrupt_reg32 = 0x00284,
  128. .sense_interrupt_reg = 0x00280,
  129. .sense_interrupt_reg32 = 0x00280,
  130. .ioarrin_reg = 0x00504,
  131. .sense_uproc_interrupt_reg = 0x00290,
  132. .sense_uproc_interrupt_reg32 = 0x00290,
  133. .set_uproc_interrupt_reg = 0x00290,
  134. .set_uproc_interrupt_reg32 = 0x00290,
  135. .clr_uproc_interrupt_reg = 0x00294,
  136. .clr_uproc_interrupt_reg32 = 0x00294
  137. }
  138. },
  139. { /* CRoC */
  140. .mailbox = 0x00044,
  141. .max_cmds = 1000,
  142. .cache_line_size = 0x20,
  143. .clear_isr = 0,
  144. .iopoll_weight = 64,
  145. {
  146. .set_interrupt_mask_reg = 0x00010,
  147. .clr_interrupt_mask_reg = 0x00018,
  148. .clr_interrupt_mask_reg32 = 0x0001C,
  149. .sense_interrupt_mask_reg = 0x00010,
  150. .sense_interrupt_mask_reg32 = 0x00014,
  151. .clr_interrupt_reg = 0x00008,
  152. .clr_interrupt_reg32 = 0x0000C,
  153. .sense_interrupt_reg = 0x00000,
  154. .sense_interrupt_reg32 = 0x00004,
  155. .ioarrin_reg = 0x00070,
  156. .sense_uproc_interrupt_reg = 0x00020,
  157. .sense_uproc_interrupt_reg32 = 0x00024,
  158. .set_uproc_interrupt_reg = 0x00020,
  159. .set_uproc_interrupt_reg32 = 0x00024,
  160. .clr_uproc_interrupt_reg = 0x00028,
  161. .clr_uproc_interrupt_reg32 = 0x0002C,
  162. .init_feedback_reg = 0x0005C,
  163. .dump_addr_reg = 0x00064,
  164. .dump_data_reg = 0x00068,
  165. .endian_swap_reg = 0x00084
  166. }
  167. },
  168. };
  169. static const struct ipr_chip_t ipr_chip[] = {
  170. { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
  171. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
  172. { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
  173. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
  174. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
  175. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
  176. { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
  177. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
  178. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
  179. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
  180. };
  181. static int ipr_max_bus_speeds[] = {
  182. IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
  183. };
  184. MODULE_AUTHOR("Brian King <[email protected]>");
  185. MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
  186. module_param_named(max_speed, ipr_max_speed, uint, 0);
  187. MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
  188. module_param_named(log_level, ipr_log_level, uint, 0);
  189. MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
  190. module_param_named(testmode, ipr_testmode, int, 0);
  191. MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
  192. module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
  193. MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
  194. module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
  195. MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
  196. module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
  197. MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
  198. module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
  199. MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
  200. module_param_named(max_devs, ipr_max_devs, int, 0);
  201. MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
  202. "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
  203. module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
  204. MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
  205. module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
  206. MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
  207. MODULE_LICENSE("GPL");
  208. MODULE_VERSION(IPR_DRIVER_VERSION);
  209. /* A constant array of IOASCs/URCs/Error Messages */
  210. static const
  211. struct ipr_error_table_t ipr_error_table[] = {
  212. {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
  213. "8155: An unknown error was received"},
  214. {0x00330000, 0, 0,
  215. "Soft underlength error"},
  216. {0x005A0000, 0, 0,
  217. "Command to be cancelled not found"},
  218. {0x00808000, 0, 0,
  219. "Qualified success"},
  220. {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
  221. "FFFE: Soft device bus error recovered by the IOA"},
  222. {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
  223. "4101: Soft device bus fabric error"},
  224. {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
  225. "FFFC: Logical block guard error recovered by the device"},
  226. {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
  227. "FFFC: Logical block reference tag error recovered by the device"},
  228. {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
  229. "4171: Recovered scatter list tag / sequence number error"},
  230. {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
  231. "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
  232. {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
  233. "4171: Recovered logical block sequence number error on IOA to Host transfer"},
  234. {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
  235. "FFFD: Recovered logical block reference tag error detected by the IOA"},
  236. {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
  237. "FFFD: Logical block guard error recovered by the IOA"},
  238. {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
  239. "FFF9: Device sector reassign successful"},
  240. {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
  241. "FFF7: Media error recovered by device rewrite procedures"},
  242. {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
  243. "7001: IOA sector reassignment successful"},
  244. {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
  245. "FFF9: Soft media error. Sector reassignment recommended"},
  246. {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
  247. "FFF7: Media error recovered by IOA rewrite procedures"},
  248. {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
  249. "FF3D: Soft PCI bus error recovered by the IOA"},
  250. {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
  251. "FFF6: Device hardware error recovered by the IOA"},
  252. {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
  253. "FFF6: Device hardware error recovered by the device"},
  254. {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
  255. "FF3D: Soft IOA error recovered by the IOA"},
  256. {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
  257. "FFFA: Undefined device response recovered by the IOA"},
  258. {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
  259. "FFF6: Device bus error, message or command phase"},
  260. {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
  261. "FFFE: Task Management Function failed"},
  262. {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
  263. "FFF6: Failure prediction threshold exceeded"},
  264. {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
  265. "8009: Impending cache battery pack failure"},
  266. {0x02040100, 0, 0,
  267. "Logical Unit in process of becoming ready"},
  268. {0x02040200, 0, 0,
  269. "Initializing command required"},
  270. {0x02040400, 0, 0,
  271. "34FF: Disk device format in progress"},
  272. {0x02040C00, 0, 0,
  273. "Logical unit not accessible, target port in unavailable state"},
  274. {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
  275. "9070: IOA requested reset"},
  276. {0x023F0000, 0, 0,
  277. "Synchronization required"},
  278. {0x02408500, 0, 0,
  279. "IOA microcode download required"},
  280. {0x02408600, 0, 0,
  281. "Device bus connection is prohibited by host"},
  282. {0x024E0000, 0, 0,
  283. "No ready, IOA shutdown"},
  284. {0x025A0000, 0, 0,
  285. "Not ready, IOA has been shutdown"},
  286. {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
  287. "3020: Storage subsystem configuration error"},
  288. {0x03110B00, 0, 0,
  289. "FFF5: Medium error, data unreadable, recommend reassign"},
  290. {0x03110C00, 0, 0,
  291. "7000: Medium error, data unreadable, do not reassign"},
  292. {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
  293. "FFF3: Disk media format bad"},
  294. {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
  295. "3002: Addressed device failed to respond to selection"},
  296. {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
  297. "3100: Device bus error"},
  298. {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
  299. "3109: IOA timed out a device command"},
  300. {0x04088000, 0, 0,
  301. "3120: SCSI bus is not operational"},
  302. {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
  303. "4100: Hard device bus fabric error"},
  304. {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
  305. "310C: Logical block guard error detected by the device"},
  306. {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
  307. "310C: Logical block reference tag error detected by the device"},
  308. {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
  309. "4170: Scatter list tag / sequence number error"},
  310. {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
  311. "8150: Logical block CRC error on IOA to Host transfer"},
  312. {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
  313. "4170: Logical block sequence number error on IOA to Host transfer"},
  314. {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
  315. "310D: Logical block reference tag error detected by the IOA"},
  316. {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
  317. "310D: Logical block guard error detected by the IOA"},
  318. {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
  319. "9000: IOA reserved area data check"},
  320. {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
  321. "9001: IOA reserved area invalid data pattern"},
  322. {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
  323. "9002: IOA reserved area LRC error"},
  324. {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
  325. "Hardware Error, IOA metadata access error"},
  326. {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
  327. "102E: Out of alternate sectors for disk storage"},
  328. {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
  329. "FFF4: Data transfer underlength error"},
  330. {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
  331. "FFF4: Data transfer overlength error"},
  332. {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
  333. "3400: Logical unit failure"},
  334. {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
  335. "FFF4: Device microcode is corrupt"},
  336. {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
  337. "8150: PCI bus error"},
  338. {0x04430000, 1, 0,
  339. "Unsupported device bus message received"},
  340. {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
  341. "FFF4: Disk device problem"},
  342. {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
  343. "8150: Permanent IOA failure"},
  344. {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
  345. "3010: Disk device returned wrong response to IOA"},
  346. {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
  347. "8151: IOA microcode error"},
  348. {0x04448500, 0, 0,
  349. "Device bus status error"},
  350. {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
  351. "8157: IOA error requiring IOA reset to recover"},
  352. {0x04448700, 0, 0,
  353. "ATA device status error"},
  354. {0x04490000, 0, 0,
  355. "Message reject received from the device"},
  356. {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
  357. "8008: A permanent cache battery pack failure occurred"},
  358. {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
  359. "9090: Disk unit has been modified after the last known status"},
  360. {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
  361. "9081: IOA detected device error"},
  362. {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
  363. "9082: IOA detected device error"},
  364. {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
  365. "3110: Device bus error, message or command phase"},
  366. {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
  367. "3110: SAS Command / Task Management Function failed"},
  368. {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
  369. "9091: Incorrect hardware configuration change has been detected"},
  370. {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
  371. "9073: Invalid multi-adapter configuration"},
  372. {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
  373. "4010: Incorrect connection between cascaded expanders"},
  374. {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
  375. "4020: Connections exceed IOA design limits"},
  376. {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
  377. "4030: Incorrect multipath connection"},
  378. {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
  379. "4110: Unsupported enclosure function"},
  380. {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
  381. "4120: SAS cable VPD cannot be read"},
  382. {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
  383. "FFF4: Command to logical unit failed"},
  384. {0x05240000, 1, 0,
  385. "Illegal request, invalid request type or request packet"},
  386. {0x05250000, 0, 0,
  387. "Illegal request, invalid resource handle"},
  388. {0x05258000, 0, 0,
  389. "Illegal request, commands not allowed to this device"},
  390. {0x05258100, 0, 0,
  391. "Illegal request, command not allowed to a secondary adapter"},
  392. {0x05258200, 0, 0,
  393. "Illegal request, command not allowed to a non-optimized resource"},
  394. {0x05260000, 0, 0,
  395. "Illegal request, invalid field in parameter list"},
  396. {0x05260100, 0, 0,
  397. "Illegal request, parameter not supported"},
  398. {0x05260200, 0, 0,
  399. "Illegal request, parameter value invalid"},
  400. {0x052C0000, 0, 0,
  401. "Illegal request, command sequence error"},
  402. {0x052C8000, 1, 0,
  403. "Illegal request, dual adapter support not enabled"},
  404. {0x052C8100, 1, 0,
  405. "Illegal request, another cable connector was physically disabled"},
  406. {0x054E8000, 1, 0,
  407. "Illegal request, inconsistent group id/group count"},
  408. {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
  409. "9031: Array protection temporarily suspended, protection resuming"},
  410. {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
  411. "9040: Array protection temporarily suspended, protection resuming"},
  412. {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
  413. "4080: IOA exceeded maximum operating temperature"},
  414. {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
  415. "4085: Service required"},
  416. {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
  417. "4086: SAS Adapter Hardware Configuration Error"},
  418. {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
  419. "3140: Device bus not ready to ready transition"},
  420. {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
  421. "FFFB: SCSI bus was reset"},
  422. {0x06290500, 0, 0,
  423. "FFFE: SCSI bus transition to single ended"},
  424. {0x06290600, 0, 0,
  425. "FFFE: SCSI bus transition to LVD"},
  426. {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
  427. "FFFB: SCSI bus was reset by another initiator"},
  428. {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
  429. "3029: A device replacement has occurred"},
  430. {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
  431. "4102: Device bus fabric performance degradation"},
  432. {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
  433. "9051: IOA cache data exists for a missing or failed device"},
  434. {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
  435. "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
  436. {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
  437. "9025: Disk unit is not supported at its physical location"},
  438. {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
  439. "3020: IOA detected a SCSI bus configuration error"},
  440. {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
  441. "3150: SCSI bus configuration error"},
  442. {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
  443. "9074: Asymmetric advanced function disk configuration"},
  444. {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
  445. "4040: Incomplete multipath connection between IOA and enclosure"},
  446. {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
  447. "4041: Incomplete multipath connection between enclosure and device"},
  448. {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
  449. "9075: Incomplete multipath connection between IOA and remote IOA"},
  450. {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
  451. "9076: Configuration error, missing remote IOA"},
  452. {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
  453. "4050: Enclosure does not support a required multipath function"},
  454. {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
  455. "4121: Configuration error, required cable is missing"},
  456. {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
  457. "4122: Cable is not plugged into the correct location on remote IOA"},
  458. {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
  459. "4123: Configuration error, invalid cable vital product data"},
  460. {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
  461. "4124: Configuration error, both cable ends are plugged into the same IOA"},
  462. {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
  463. "4070: Logically bad block written on device"},
  464. {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
  465. "9041: Array protection temporarily suspended"},
  466. {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
  467. "9042: Corrupt array parity detected on specified device"},
  468. {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
  469. "9030: Array no longer protected due to missing or failed disk unit"},
  470. {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
  471. "9071: Link operational transition"},
  472. {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
  473. "9072: Link not operational transition"},
  474. {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
  475. "9032: Array exposed but still protected"},
  476. {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
  477. "70DD: Device forced failed by disrupt device command"},
  478. {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
  479. "4061: Multipath redundancy level got better"},
  480. {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
  481. "4060: Multipath redundancy level got worse"},
  482. {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
  483. "9083: Device raw mode enabled"},
  484. {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
  485. "9084: Device raw mode disabled"},
  486. {0x07270000, 0, 0,
  487. "Failure due to other device"},
  488. {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
  489. "9008: IOA does not support functions expected by devices"},
  490. {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
  491. "9010: Cache data associated with attached devices cannot be found"},
  492. {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
  493. "9011: Cache data belongs to devices other than those attached"},
  494. {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
  495. "9020: Array missing 2 or more devices with only 1 device present"},
  496. {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
  497. "9021: Array missing 2 or more devices with 2 or more devices present"},
  498. {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
  499. "9022: Exposed array is missing a required device"},
  500. {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
  501. "9023: Array member(s) not at required physical locations"},
  502. {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
  503. "9024: Array not functional due to present hardware configuration"},
  504. {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
  505. "9026: Array not functional due to present hardware configuration"},
  506. {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
  507. "9027: Array is missing a device and parity is out of sync"},
  508. {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
  509. "9028: Maximum number of arrays already exist"},
  510. {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
  511. "9050: Required cache data cannot be located for a disk unit"},
  512. {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
  513. "9052: Cache data exists for a device that has been modified"},
  514. {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
  515. "9054: IOA resources not available due to previous problems"},
  516. {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
  517. "9092: Disk unit requires initialization before use"},
  518. {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
  519. "9029: Incorrect hardware configuration change has been detected"},
  520. {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
  521. "9060: One or more disk pairs are missing from an array"},
  522. {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
  523. "9061: One or more disks are missing from an array"},
  524. {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
  525. "9062: One or more disks are missing from an array"},
  526. {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
  527. "9063: Maximum number of functional arrays has been exceeded"},
  528. {0x07279A00, 0, 0,
  529. "Data protect, other volume set problem"},
  530. {0x0B260000, 0, 0,
  531. "Aborted command, invalid descriptor"},
  532. {0x0B3F9000, 0, 0,
  533. "Target operating conditions have changed, dual adapter takeover"},
  534. {0x0B530200, 0, 0,
  535. "Aborted command, medium removal prevented"},
  536. {0x0B5A0000, 0, 0,
  537. "Command terminated by host"},
  538. {0x0B5B8000, 0, 0,
  539. "Aborted command, command terminated by host"}
  540. };
  541. static const struct ipr_ses_table_entry ipr_ses_table[] = {
  542. { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
  543. { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
  544. { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
  545. { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
  546. { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
  547. { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
  548. { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
  549. { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
  550. { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
  551. { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
  552. { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
  553. { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
  554. { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
  555. };
  556. /*
  557. * Function Prototypes
  558. */
  559. static int ipr_reset_alert(struct ipr_cmnd *);
  560. static void ipr_process_ccn(struct ipr_cmnd *);
  561. static void ipr_process_error(struct ipr_cmnd *);
  562. static void ipr_reset_ioa_job(struct ipr_cmnd *);
  563. static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
  564. enum ipr_shutdown_type);
  565. #ifdef CONFIG_SCSI_IPR_TRACE
  566. /**
  567. * ipr_trc_hook - Add a trace entry to the driver trace
  568. * @ipr_cmd: ipr command struct
  569. * @type: trace type
  570. * @add_data: additional data
  571. *
  572. * Return value:
  573. * none
  574. **/
  575. static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
  576. u8 type, u32 add_data)
  577. {
  578. struct ipr_trace_entry *trace_entry;
  579. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  580. unsigned int trace_index;
  581. trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
  582. trace_entry = &ioa_cfg->trace[trace_index];
  583. trace_entry->time = jiffies;
  584. trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
  585. trace_entry->type = type;
  586. if (ipr_cmd->ioa_cfg->sis64)
  587. trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
  588. else
  589. trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
  590. trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
  591. trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
  592. trace_entry->u.add_data = add_data;
  593. wmb();
  594. }
  595. #else
  596. #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
  597. #endif
  598. /**
  599. * ipr_lock_and_done - Acquire lock and complete command
  600. * @ipr_cmd: ipr command struct
  601. *
  602. * Return value:
  603. * none
  604. **/
  605. static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
  606. {
  607. unsigned long lock_flags;
  608. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  609. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  610. ipr_cmd->done(ipr_cmd);
  611. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  612. }
  613. /**
  614. * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
  615. * @ipr_cmd: ipr command struct
  616. *
  617. * Return value:
  618. * none
  619. **/
  620. static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
  621. {
  622. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  623. struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
  624. struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
  625. dma_addr_t dma_addr = ipr_cmd->dma_addr;
  626. int hrrq_id;
  627. hrrq_id = ioarcb->cmd_pkt.hrrq_id;
  628. memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
  629. ioarcb->cmd_pkt.hrrq_id = hrrq_id;
  630. ioarcb->data_transfer_length = 0;
  631. ioarcb->read_data_transfer_length = 0;
  632. ioarcb->ioadl_len = 0;
  633. ioarcb->read_ioadl_len = 0;
  634. if (ipr_cmd->ioa_cfg->sis64) {
  635. ioarcb->u.sis64_addr_data.data_ioadl_addr =
  636. cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
  637. ioasa64->u.gata.status = 0;
  638. } else {
  639. ioarcb->write_ioadl_addr =
  640. cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
  641. ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
  642. ioasa->u.gata.status = 0;
  643. }
  644. ioasa->hdr.ioasc = 0;
  645. ioasa->hdr.residual_data_len = 0;
  646. ipr_cmd->scsi_cmd = NULL;
  647. ipr_cmd->qc = NULL;
  648. ipr_cmd->sense_buffer[0] = 0;
  649. ipr_cmd->dma_use_sg = 0;
  650. }
  651. /**
  652. * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
  653. * @ipr_cmd: ipr command struct
  654. * @fast_done: fast done function call-back
  655. *
  656. * Return value:
  657. * none
  658. **/
  659. static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
  660. void (*fast_done) (struct ipr_cmnd *))
  661. {
  662. ipr_reinit_ipr_cmnd(ipr_cmd);
  663. ipr_cmd->u.scratch = 0;
  664. ipr_cmd->sibling = NULL;
  665. ipr_cmd->eh_comp = NULL;
  666. ipr_cmd->fast_done = fast_done;
  667. timer_setup(&ipr_cmd->timer, NULL, 0);
  668. }
  669. /**
  670. * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
  671. * @hrrq: hrr queue
  672. *
  673. * Return value:
  674. * pointer to ipr command struct
  675. **/
  676. static
  677. struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
  678. {
  679. struct ipr_cmnd *ipr_cmd = NULL;
  680. if (likely(!list_empty(&hrrq->hrrq_free_q))) {
  681. ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
  682. struct ipr_cmnd, queue);
  683. list_del(&ipr_cmd->queue);
  684. }
  685. return ipr_cmd;
  686. }
  687. /**
  688. * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
  689. * @ioa_cfg: ioa config struct
  690. *
  691. * Return value:
  692. * pointer to ipr command struct
  693. **/
  694. static
  695. struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
  696. {
  697. struct ipr_cmnd *ipr_cmd =
  698. __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
  699. ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
  700. return ipr_cmd;
  701. }
  702. /**
  703. * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
  704. * @ioa_cfg: ioa config struct
  705. * @clr_ints: interrupts to clear
  706. *
  707. * This function masks all interrupts on the adapter, then clears the
  708. * interrupts specified in the mask
  709. *
  710. * Return value:
  711. * none
  712. **/
  713. static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
  714. u32 clr_ints)
  715. {
  716. int i;
  717. /* Stop new interrupts */
  718. for (i = 0; i < ioa_cfg->hrrq_num; i++) {
  719. spin_lock(&ioa_cfg->hrrq[i]._lock);
  720. ioa_cfg->hrrq[i].allow_interrupts = 0;
  721. spin_unlock(&ioa_cfg->hrrq[i]._lock);
  722. }
  723. /* Set interrupt mask to stop all new interrupts */
  724. if (ioa_cfg->sis64)
  725. writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
  726. else
  727. writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
  728. /* Clear any pending interrupts */
  729. if (ioa_cfg->sis64)
  730. writel(~0, ioa_cfg->regs.clr_interrupt_reg);
  731. writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
  732. readl(ioa_cfg->regs.sense_interrupt_reg);
  733. }
  734. /**
  735. * ipr_save_pcix_cmd_reg - Save PCI-X command register
  736. * @ioa_cfg: ioa config struct
  737. *
  738. * Return value:
  739. * 0 on success / -EIO on failure
  740. **/
  741. static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
  742. {
  743. int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
  744. if (pcix_cmd_reg == 0)
  745. return 0;
  746. if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
  747. &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
  748. dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
  749. return -EIO;
  750. }
  751. ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
  752. return 0;
  753. }
  754. /**
  755. * ipr_set_pcix_cmd_reg - Setup PCI-X command register
  756. * @ioa_cfg: ioa config struct
  757. *
  758. * Return value:
  759. * 0 on success / -EIO on failure
  760. **/
  761. static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
  762. {
  763. int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
  764. if (pcix_cmd_reg) {
  765. if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
  766. ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
  767. dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
  768. return -EIO;
  769. }
  770. }
  771. return 0;
  772. }
  773. /**
  774. * __ipr_sata_eh_done - done function for aborted SATA commands
  775. * @ipr_cmd: ipr command struct
  776. *
  777. * This function is invoked for ops generated to SATA
  778. * devices which are being aborted.
  779. *
  780. * Return value:
  781. * none
  782. **/
  783. static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
  784. {
  785. struct ata_queued_cmd *qc = ipr_cmd->qc;
  786. struct ipr_sata_port *sata_port = qc->ap->private_data;
  787. qc->err_mask |= AC_ERR_OTHER;
  788. sata_port->ioasa.status |= ATA_BUSY;
  789. ata_qc_complete(qc);
  790. if (ipr_cmd->eh_comp)
  791. complete(ipr_cmd->eh_comp);
  792. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  793. }
  794. /**
  795. * ipr_sata_eh_done - done function for aborted SATA commands
  796. * @ipr_cmd: ipr command struct
  797. *
  798. * This function is invoked for ops generated to SATA
  799. * devices which are being aborted.
  800. *
  801. * Return value:
  802. * none
  803. **/
  804. static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
  805. {
  806. struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
  807. unsigned long hrrq_flags;
  808. spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
  809. __ipr_sata_eh_done(ipr_cmd);
  810. spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
  811. }
  812. /**
  813. * __ipr_scsi_eh_done - mid-layer done function for aborted ops
  814. * @ipr_cmd: ipr command struct
  815. *
  816. * This function is invoked by the interrupt handler for
  817. * ops generated by the SCSI mid-layer which are being aborted.
  818. *
  819. * Return value:
  820. * none
  821. **/
  822. static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
  823. {
  824. struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
  825. scsi_cmd->result |= (DID_ERROR << 16);
  826. scsi_dma_unmap(ipr_cmd->scsi_cmd);
  827. scsi_done(scsi_cmd);
  828. if (ipr_cmd->eh_comp)
  829. complete(ipr_cmd->eh_comp);
  830. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  831. }
  832. /**
  833. * ipr_scsi_eh_done - mid-layer done function for aborted ops
  834. * @ipr_cmd: ipr command struct
  835. *
  836. * This function is invoked by the interrupt handler for
  837. * ops generated by the SCSI mid-layer which are being aborted.
  838. *
  839. * Return value:
  840. * none
  841. **/
  842. static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
  843. {
  844. unsigned long hrrq_flags;
  845. struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
  846. spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
  847. __ipr_scsi_eh_done(ipr_cmd);
  848. spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
  849. }
  850. /**
  851. * ipr_fail_all_ops - Fails all outstanding ops.
  852. * @ioa_cfg: ioa config struct
  853. *
  854. * This function fails all outstanding ops.
  855. *
  856. * Return value:
  857. * none
  858. **/
  859. static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
  860. {
  861. struct ipr_cmnd *ipr_cmd, *temp;
  862. struct ipr_hrr_queue *hrrq;
  863. ENTER;
  864. for_each_hrrq(hrrq, ioa_cfg) {
  865. spin_lock(&hrrq->_lock);
  866. list_for_each_entry_safe(ipr_cmd,
  867. temp, &hrrq->hrrq_pending_q, queue) {
  868. list_del(&ipr_cmd->queue);
  869. ipr_cmd->s.ioasa.hdr.ioasc =
  870. cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
  871. ipr_cmd->s.ioasa.hdr.ilid =
  872. cpu_to_be32(IPR_DRIVER_ILID);
  873. if (ipr_cmd->scsi_cmd)
  874. ipr_cmd->done = __ipr_scsi_eh_done;
  875. else if (ipr_cmd->qc)
  876. ipr_cmd->done = __ipr_sata_eh_done;
  877. ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
  878. IPR_IOASC_IOA_WAS_RESET);
  879. del_timer(&ipr_cmd->timer);
  880. ipr_cmd->done(ipr_cmd);
  881. }
  882. spin_unlock(&hrrq->_lock);
  883. }
  884. LEAVE;
  885. }
  886. /**
  887. * ipr_send_command - Send driver initiated requests.
  888. * @ipr_cmd: ipr command struct
  889. *
  890. * This function sends a command to the adapter using the correct write call.
  891. * In the case of sis64, calculate the ioarcb size required. Then or in the
  892. * appropriate bits.
  893. *
  894. * Return value:
  895. * none
  896. **/
  897. static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
  898. {
  899. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  900. dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
  901. if (ioa_cfg->sis64) {
  902. /* The default size is 256 bytes */
  903. send_dma_addr |= 0x1;
  904. /* If the number of ioadls * size of ioadl > 128 bytes,
  905. then use a 512 byte ioarcb */
  906. if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
  907. send_dma_addr |= 0x4;
  908. writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
  909. } else
  910. writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
  911. }
  912. /**
  913. * ipr_do_req - Send driver initiated requests.
  914. * @ipr_cmd: ipr command struct
  915. * @done: done function
  916. * @timeout_func: timeout function
  917. * @timeout: timeout value
  918. *
  919. * This function sends the specified command to the adapter with the
  920. * timeout given. The done function is invoked on command completion.
  921. *
  922. * Return value:
  923. * none
  924. **/
  925. static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
  926. void (*done) (struct ipr_cmnd *),
  927. void (*timeout_func) (struct timer_list *), u32 timeout)
  928. {
  929. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
  930. ipr_cmd->done = done;
  931. ipr_cmd->timer.expires = jiffies + timeout;
  932. ipr_cmd->timer.function = timeout_func;
  933. add_timer(&ipr_cmd->timer);
  934. ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
  935. ipr_send_command(ipr_cmd);
  936. }
  937. /**
  938. * ipr_internal_cmd_done - Op done function for an internally generated op.
  939. * @ipr_cmd: ipr command struct
  940. *
  941. * This function is the op done function for an internally generated,
  942. * blocking op. It simply wakes the sleeping thread.
  943. *
  944. * Return value:
  945. * none
  946. **/
  947. static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
  948. {
  949. if (ipr_cmd->sibling)
  950. ipr_cmd->sibling = NULL;
  951. else
  952. complete(&ipr_cmd->completion);
  953. }
  954. /**
  955. * ipr_init_ioadl - initialize the ioadl for the correct SIS type
  956. * @ipr_cmd: ipr command struct
  957. * @dma_addr: dma address
  958. * @len: transfer length
  959. * @flags: ioadl flag value
  960. *
  961. * This function initializes an ioadl in the case where there is only a single
  962. * descriptor.
  963. *
  964. * Return value:
  965. * nothing
  966. **/
  967. static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
  968. u32 len, int flags)
  969. {
  970. struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
  971. struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
  972. ipr_cmd->dma_use_sg = 1;
  973. if (ipr_cmd->ioa_cfg->sis64) {
  974. ioadl64->flags = cpu_to_be32(flags);
  975. ioadl64->data_len = cpu_to_be32(len);
  976. ioadl64->address = cpu_to_be64(dma_addr);
  977. ipr_cmd->ioarcb.ioadl_len =
  978. cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
  979. ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
  980. } else {
  981. ioadl->flags_and_data_len = cpu_to_be32(flags | len);
  982. ioadl->address = cpu_to_be32(dma_addr);
  983. if (flags == IPR_IOADL_FLAGS_READ_LAST) {
  984. ipr_cmd->ioarcb.read_ioadl_len =
  985. cpu_to_be32(sizeof(struct ipr_ioadl_desc));
  986. ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
  987. } else {
  988. ipr_cmd->ioarcb.ioadl_len =
  989. cpu_to_be32(sizeof(struct ipr_ioadl_desc));
  990. ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
  991. }
  992. }
  993. }
  994. /**
  995. * ipr_send_blocking_cmd - Send command and sleep on its completion.
  996. * @ipr_cmd: ipr command struct
  997. * @timeout_func: function to invoke if command times out
  998. * @timeout: timeout
  999. *
  1000. * Return value:
  1001. * none
  1002. **/
  1003. static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
  1004. void (*timeout_func) (struct timer_list *),
  1005. u32 timeout)
  1006. {
  1007. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  1008. init_completion(&ipr_cmd->completion);
  1009. ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
  1010. spin_unlock_irq(ioa_cfg->host->host_lock);
  1011. wait_for_completion(&ipr_cmd->completion);
  1012. spin_lock_irq(ioa_cfg->host->host_lock);
  1013. }
  1014. static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
  1015. {
  1016. unsigned int hrrq;
  1017. if (ioa_cfg->hrrq_num == 1)
  1018. hrrq = 0;
  1019. else {
  1020. hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
  1021. hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
  1022. }
  1023. return hrrq;
  1024. }
  1025. /**
  1026. * ipr_send_hcam - Send an HCAM to the adapter.
  1027. * @ioa_cfg: ioa config struct
  1028. * @type: HCAM type
  1029. * @hostrcb: hostrcb struct
  1030. *
  1031. * This function will send a Host Controlled Async command to the adapter.
  1032. * If HCAMs are currently not allowed to be issued to the adapter, it will
  1033. * place the hostrcb on the free queue.
  1034. *
  1035. * Return value:
  1036. * none
  1037. **/
  1038. static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
  1039. struct ipr_hostrcb *hostrcb)
  1040. {
  1041. struct ipr_cmnd *ipr_cmd;
  1042. struct ipr_ioarcb *ioarcb;
  1043. if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
  1044. ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
  1045. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
  1046. list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
  1047. ipr_cmd->u.hostrcb = hostrcb;
  1048. ioarcb = &ipr_cmd->ioarcb;
  1049. ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
  1050. ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
  1051. ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
  1052. ioarcb->cmd_pkt.cdb[1] = type;
  1053. ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
  1054. ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
  1055. ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
  1056. sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
  1057. if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
  1058. ipr_cmd->done = ipr_process_ccn;
  1059. else
  1060. ipr_cmd->done = ipr_process_error;
  1061. ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
  1062. ipr_send_command(ipr_cmd);
  1063. } else {
  1064. list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
  1065. }
  1066. }
  1067. /**
  1068. * ipr_update_ata_class - Update the ata class in the resource entry
  1069. * @res: resource entry struct
  1070. * @proto: cfgte device bus protocol value
  1071. *
  1072. * Return value:
  1073. * none
  1074. **/
  1075. static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
  1076. {
  1077. switch (proto) {
  1078. case IPR_PROTO_SATA:
  1079. case IPR_PROTO_SAS_STP:
  1080. res->ata_class = ATA_DEV_ATA;
  1081. break;
  1082. case IPR_PROTO_SATA_ATAPI:
  1083. case IPR_PROTO_SAS_STP_ATAPI:
  1084. res->ata_class = ATA_DEV_ATAPI;
  1085. break;
  1086. default:
  1087. res->ata_class = ATA_DEV_UNKNOWN;
  1088. break;
  1089. }
  1090. }
  1091. /**
  1092. * ipr_init_res_entry - Initialize a resource entry struct.
  1093. * @res: resource entry struct
  1094. * @cfgtew: config table entry wrapper struct
  1095. *
  1096. * Return value:
  1097. * none
  1098. **/
  1099. static void ipr_init_res_entry(struct ipr_resource_entry *res,
  1100. struct ipr_config_table_entry_wrapper *cfgtew)
  1101. {
  1102. int found = 0;
  1103. unsigned int proto;
  1104. struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
  1105. struct ipr_resource_entry *gscsi_res = NULL;
  1106. res->needs_sync_complete = 0;
  1107. res->in_erp = 0;
  1108. res->add_to_ml = 0;
  1109. res->del_from_ml = 0;
  1110. res->resetting_device = 0;
  1111. res->reset_occurred = 0;
  1112. res->sdev = NULL;
  1113. res->sata_port = NULL;
  1114. if (ioa_cfg->sis64) {
  1115. proto = cfgtew->u.cfgte64->proto;
  1116. res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
  1117. res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
  1118. res->qmodel = IPR_QUEUEING_MODEL64(res);
  1119. res->type = cfgtew->u.cfgte64->res_type;
  1120. memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
  1121. sizeof(res->res_path));
  1122. res->bus = 0;
  1123. memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
  1124. sizeof(res->dev_lun.scsi_lun));
  1125. res->lun = scsilun_to_int(&res->dev_lun);
  1126. if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
  1127. list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
  1128. if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
  1129. found = 1;
  1130. res->target = gscsi_res->target;
  1131. break;
  1132. }
  1133. }
  1134. if (!found) {
  1135. res->target = find_first_zero_bit(ioa_cfg->target_ids,
  1136. ioa_cfg->max_devs_supported);
  1137. set_bit(res->target, ioa_cfg->target_ids);
  1138. }
  1139. } else if (res->type == IPR_RES_TYPE_IOAFP) {
  1140. res->bus = IPR_IOAFP_VIRTUAL_BUS;
  1141. res->target = 0;
  1142. } else if (res->type == IPR_RES_TYPE_ARRAY) {
  1143. res->bus = IPR_ARRAY_VIRTUAL_BUS;
  1144. res->target = find_first_zero_bit(ioa_cfg->array_ids,
  1145. ioa_cfg->max_devs_supported);
  1146. set_bit(res->target, ioa_cfg->array_ids);
  1147. } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
  1148. res->bus = IPR_VSET_VIRTUAL_BUS;
  1149. res->target = find_first_zero_bit(ioa_cfg->vset_ids,
  1150. ioa_cfg->max_devs_supported);
  1151. set_bit(res->target, ioa_cfg->vset_ids);
  1152. } else {
  1153. res->target = find_first_zero_bit(ioa_cfg->target_ids,
  1154. ioa_cfg->max_devs_supported);
  1155. set_bit(res->target, ioa_cfg->target_ids);
  1156. }
  1157. } else {
  1158. proto = cfgtew->u.cfgte->proto;
  1159. res->qmodel = IPR_QUEUEING_MODEL(res);
  1160. res->flags = cfgtew->u.cfgte->flags;
  1161. if (res->flags & IPR_IS_IOA_RESOURCE)
  1162. res->type = IPR_RES_TYPE_IOAFP;
  1163. else
  1164. res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
  1165. res->bus = cfgtew->u.cfgte->res_addr.bus;
  1166. res->target = cfgtew->u.cfgte->res_addr.target;
  1167. res->lun = cfgtew->u.cfgte->res_addr.lun;
  1168. res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
  1169. }
  1170. ipr_update_ata_class(res, proto);
  1171. }
  1172. /**
  1173. * ipr_is_same_device - Determine if two devices are the same.
  1174. * @res: resource entry struct
  1175. * @cfgtew: config table entry wrapper struct
  1176. *
  1177. * Return value:
  1178. * 1 if the devices are the same / 0 otherwise
  1179. **/
  1180. static int ipr_is_same_device(struct ipr_resource_entry *res,
  1181. struct ipr_config_table_entry_wrapper *cfgtew)
  1182. {
  1183. if (res->ioa_cfg->sis64) {
  1184. if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
  1185. sizeof(cfgtew->u.cfgte64->dev_id)) &&
  1186. !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
  1187. sizeof(cfgtew->u.cfgte64->lun))) {
  1188. return 1;
  1189. }
  1190. } else {
  1191. if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
  1192. res->target == cfgtew->u.cfgte->res_addr.target &&
  1193. res->lun == cfgtew->u.cfgte->res_addr.lun)
  1194. return 1;
  1195. }
  1196. return 0;
  1197. }
  1198. /**
  1199. * __ipr_format_res_path - Format the resource path for printing.
  1200. * @res_path: resource path
  1201. * @buffer: buffer
  1202. * @len: length of buffer provided
  1203. *
  1204. * Return value:
  1205. * pointer to buffer
  1206. **/
  1207. static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
  1208. {
  1209. int i;
  1210. char *p = buffer;
  1211. *p = '\0';
  1212. p += scnprintf(p, buffer + len - p, "%02X", res_path[0]);
  1213. for (i = 1; res_path[i] != 0xff && i < IPR_RES_PATH_BYTES; i++)
  1214. p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]);
  1215. return buffer;
  1216. }
  1217. /**
  1218. * ipr_format_res_path - Format the resource path for printing.
  1219. * @ioa_cfg: ioa config struct
  1220. * @res_path: resource path
  1221. * @buffer: buffer
  1222. * @len: length of buffer provided
  1223. *
  1224. * Return value:
  1225. * pointer to buffer
  1226. **/
  1227. static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
  1228. u8 *res_path, char *buffer, int len)
  1229. {
  1230. char *p = buffer;
  1231. *p = '\0';
  1232. p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
  1233. __ipr_format_res_path(res_path, p, len - (p - buffer));
  1234. return buffer;
  1235. }
  1236. /**
  1237. * ipr_update_res_entry - Update the resource entry.
  1238. * @res: resource entry struct
  1239. * @cfgtew: config table entry wrapper struct
  1240. *
  1241. * Return value:
  1242. * none
  1243. **/
  1244. static void ipr_update_res_entry(struct ipr_resource_entry *res,
  1245. struct ipr_config_table_entry_wrapper *cfgtew)
  1246. {
  1247. char buffer[IPR_MAX_RES_PATH_LENGTH];
  1248. unsigned int proto;
  1249. int new_path = 0;
  1250. if (res->ioa_cfg->sis64) {
  1251. res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
  1252. res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
  1253. res->type = cfgtew->u.cfgte64->res_type;
  1254. memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
  1255. sizeof(struct ipr_std_inq_data));
  1256. res->qmodel = IPR_QUEUEING_MODEL64(res);
  1257. proto = cfgtew->u.cfgte64->proto;
  1258. res->res_handle = cfgtew->u.cfgte64->res_handle;
  1259. res->dev_id = cfgtew->u.cfgte64->dev_id;
  1260. memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
  1261. sizeof(res->dev_lun.scsi_lun));
  1262. if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
  1263. sizeof(res->res_path))) {
  1264. memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
  1265. sizeof(res->res_path));
  1266. new_path = 1;
  1267. }
  1268. if (res->sdev && new_path)
  1269. sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
  1270. ipr_format_res_path(res->ioa_cfg,
  1271. res->res_path, buffer, sizeof(buffer)));
  1272. } else {
  1273. res->flags = cfgtew->u.cfgte->flags;
  1274. if (res->flags & IPR_IS_IOA_RESOURCE)
  1275. res->type = IPR_RES_TYPE_IOAFP;
  1276. else
  1277. res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
  1278. memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
  1279. sizeof(struct ipr_std_inq_data));
  1280. res->qmodel = IPR_QUEUEING_MODEL(res);
  1281. proto = cfgtew->u.cfgte->proto;
  1282. res->res_handle = cfgtew->u.cfgte->res_handle;
  1283. }
  1284. ipr_update_ata_class(res, proto);
  1285. }
  1286. /**
  1287. * ipr_clear_res_target - Clear the bit in the bit map representing the target
  1288. * for the resource.
  1289. * @res: resource entry struct
  1290. *
  1291. * Return value:
  1292. * none
  1293. **/
  1294. static void ipr_clear_res_target(struct ipr_resource_entry *res)
  1295. {
  1296. struct ipr_resource_entry *gscsi_res = NULL;
  1297. struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
  1298. if (!ioa_cfg->sis64)
  1299. return;
  1300. if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
  1301. clear_bit(res->target, ioa_cfg->array_ids);
  1302. else if (res->bus == IPR_VSET_VIRTUAL_BUS)
  1303. clear_bit(res->target, ioa_cfg->vset_ids);
  1304. else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
  1305. list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
  1306. if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
  1307. return;
  1308. clear_bit(res->target, ioa_cfg->target_ids);
  1309. } else if (res->bus == 0)
  1310. clear_bit(res->target, ioa_cfg->target_ids);
  1311. }
  1312. /**
  1313. * ipr_handle_config_change - Handle a config change from the adapter
  1314. * @ioa_cfg: ioa config struct
  1315. * @hostrcb: hostrcb
  1316. *
  1317. * Return value:
  1318. * none
  1319. **/
  1320. static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
  1321. struct ipr_hostrcb *hostrcb)
  1322. {
  1323. struct ipr_resource_entry *res = NULL;
  1324. struct ipr_config_table_entry_wrapper cfgtew;
  1325. __be32 cc_res_handle;
  1326. u32 is_ndn = 1;
  1327. if (ioa_cfg->sis64) {
  1328. cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
  1329. cc_res_handle = cfgtew.u.cfgte64->res_handle;
  1330. } else {
  1331. cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
  1332. cc_res_handle = cfgtew.u.cfgte->res_handle;
  1333. }
  1334. list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
  1335. if (res->res_handle == cc_res_handle) {
  1336. is_ndn = 0;
  1337. break;
  1338. }
  1339. }
  1340. if (is_ndn) {
  1341. if (list_empty(&ioa_cfg->free_res_q)) {
  1342. ipr_send_hcam(ioa_cfg,
  1343. IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
  1344. hostrcb);
  1345. return;
  1346. }
  1347. res = list_entry(ioa_cfg->free_res_q.next,
  1348. struct ipr_resource_entry, queue);
  1349. list_del(&res->queue);
  1350. ipr_init_res_entry(res, &cfgtew);
  1351. list_add_tail(&res->queue, &ioa_cfg->used_res_q);
  1352. }
  1353. ipr_update_res_entry(res, &cfgtew);
  1354. if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
  1355. if (res->sdev) {
  1356. res->del_from_ml = 1;
  1357. res->res_handle = IPR_INVALID_RES_HANDLE;
  1358. schedule_work(&ioa_cfg->work_q);
  1359. } else {
  1360. ipr_clear_res_target(res);
  1361. list_move_tail(&res->queue, &ioa_cfg->free_res_q);
  1362. }
  1363. } else if (!res->sdev || res->del_from_ml) {
  1364. res->add_to_ml = 1;
  1365. schedule_work(&ioa_cfg->work_q);
  1366. }
  1367. ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
  1368. }
  1369. /**
  1370. * ipr_process_ccn - Op done function for a CCN.
  1371. * @ipr_cmd: ipr command struct
  1372. *
  1373. * This function is the op done function for a configuration
  1374. * change notification host controlled async from the adapter.
  1375. *
  1376. * Return value:
  1377. * none
  1378. **/
  1379. static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
  1380. {
  1381. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  1382. struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
  1383. u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
  1384. list_del_init(&hostrcb->queue);
  1385. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  1386. if (ioasc) {
  1387. if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
  1388. ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
  1389. dev_err(&ioa_cfg->pdev->dev,
  1390. "Host RCB failed with IOASC: 0x%08X\n", ioasc);
  1391. ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
  1392. } else {
  1393. ipr_handle_config_change(ioa_cfg, hostrcb);
  1394. }
  1395. }
  1396. /**
  1397. * strip_whitespace - Strip and pad trailing whitespace.
  1398. * @i: size of buffer
  1399. * @buf: string to modify
  1400. *
  1401. * This function will strip all trailing whitespace and
  1402. * NUL terminate the string.
  1403. *
  1404. **/
  1405. static void strip_whitespace(int i, char *buf)
  1406. {
  1407. if (i < 1)
  1408. return;
  1409. i--;
  1410. while (i && buf[i] == ' ')
  1411. i--;
  1412. buf[i+1] = '\0';
  1413. }
  1414. /**
  1415. * ipr_log_vpd_compact - Log the passed extended VPD compactly.
  1416. * @prefix: string to print at start of printk
  1417. * @hostrcb: hostrcb pointer
  1418. * @vpd: vendor/product id/sn struct
  1419. *
  1420. * Return value:
  1421. * none
  1422. **/
  1423. static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
  1424. struct ipr_vpd *vpd)
  1425. {
  1426. char vendor_id[IPR_VENDOR_ID_LEN + 1];
  1427. char product_id[IPR_PROD_ID_LEN + 1];
  1428. char sn[IPR_SERIAL_NUM_LEN + 1];
  1429. memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
  1430. strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id);
  1431. memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN);
  1432. strip_whitespace(IPR_PROD_ID_LEN, product_id);
  1433. memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN);
  1434. strip_whitespace(IPR_SERIAL_NUM_LEN, sn);
  1435. ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix,
  1436. vendor_id, product_id, sn);
  1437. }
  1438. /**
  1439. * ipr_log_vpd - Log the passed VPD to the error log.
  1440. * @vpd: vendor/product id/sn struct
  1441. *
  1442. * Return value:
  1443. * none
  1444. **/
  1445. static void ipr_log_vpd(struct ipr_vpd *vpd)
  1446. {
  1447. char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
  1448. + IPR_SERIAL_NUM_LEN];
  1449. memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
  1450. memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
  1451. IPR_PROD_ID_LEN);
  1452. buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
  1453. ipr_err("Vendor/Product ID: %s\n", buffer);
  1454. memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
  1455. buffer[IPR_SERIAL_NUM_LEN] = '\0';
  1456. ipr_err(" Serial Number: %s\n", buffer);
  1457. }
  1458. /**
  1459. * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
  1460. * @prefix: string to print at start of printk
  1461. * @hostrcb: hostrcb pointer
  1462. * @vpd: vendor/product id/sn/wwn struct
  1463. *
  1464. * Return value:
  1465. * none
  1466. **/
  1467. static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
  1468. struct ipr_ext_vpd *vpd)
  1469. {
  1470. ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
  1471. ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
  1472. be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
  1473. }
  1474. /**
  1475. * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
  1476. * @vpd: vendor/product id/sn/wwn struct
  1477. *
  1478. * Return value:
  1479. * none
  1480. **/
  1481. static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
  1482. {
  1483. ipr_log_vpd(&vpd->vpd);
  1484. ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
  1485. be32_to_cpu(vpd->wwid[1]));
  1486. }
  1487. /**
  1488. * ipr_log_enhanced_cache_error - Log a cache error.
  1489. * @ioa_cfg: ioa config struct
  1490. * @hostrcb: hostrcb struct
  1491. *
  1492. * Return value:
  1493. * none
  1494. **/
  1495. static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
  1496. struct ipr_hostrcb *hostrcb)
  1497. {
  1498. struct ipr_hostrcb_type_12_error *error;
  1499. if (ioa_cfg->sis64)
  1500. error = &hostrcb->hcam.u.error64.u.type_12_error;
  1501. else
  1502. error = &hostrcb->hcam.u.error.u.type_12_error;
  1503. ipr_err("-----Current Configuration-----\n");
  1504. ipr_err("Cache Directory Card Information:\n");
  1505. ipr_log_ext_vpd(&error->ioa_vpd);
  1506. ipr_err("Adapter Card Information:\n");
  1507. ipr_log_ext_vpd(&error->cfc_vpd);
  1508. ipr_err("-----Expected Configuration-----\n");
  1509. ipr_err("Cache Directory Card Information:\n");
  1510. ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
  1511. ipr_err("Adapter Card Information:\n");
  1512. ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
  1513. ipr_err("Additional IOA Data: %08X %08X %08X\n",
  1514. be32_to_cpu(error->ioa_data[0]),
  1515. be32_to_cpu(error->ioa_data[1]),
  1516. be32_to_cpu(error->ioa_data[2]));
  1517. }
  1518. /**
  1519. * ipr_log_cache_error - Log a cache error.
  1520. * @ioa_cfg: ioa config struct
  1521. * @hostrcb: hostrcb struct
  1522. *
  1523. * Return value:
  1524. * none
  1525. **/
  1526. static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
  1527. struct ipr_hostrcb *hostrcb)
  1528. {
  1529. struct ipr_hostrcb_type_02_error *error =
  1530. &hostrcb->hcam.u.error.u.type_02_error;
  1531. ipr_err("-----Current Configuration-----\n");
  1532. ipr_err("Cache Directory Card Information:\n");
  1533. ipr_log_vpd(&error->ioa_vpd);
  1534. ipr_err("Adapter Card Information:\n");
  1535. ipr_log_vpd(&error->cfc_vpd);
  1536. ipr_err("-----Expected Configuration-----\n");
  1537. ipr_err("Cache Directory Card Information:\n");
  1538. ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
  1539. ipr_err("Adapter Card Information:\n");
  1540. ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
  1541. ipr_err("Additional IOA Data: %08X %08X %08X\n",
  1542. be32_to_cpu(error->ioa_data[0]),
  1543. be32_to_cpu(error->ioa_data[1]),
  1544. be32_to_cpu(error->ioa_data[2]));
  1545. }
  1546. /**
  1547. * ipr_log_enhanced_config_error - Log a configuration error.
  1548. * @ioa_cfg: ioa config struct
  1549. * @hostrcb: hostrcb struct
  1550. *
  1551. * Return value:
  1552. * none
  1553. **/
  1554. static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
  1555. struct ipr_hostrcb *hostrcb)
  1556. {
  1557. int errors_logged, i;
  1558. struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
  1559. struct ipr_hostrcb_type_13_error *error;
  1560. error = &hostrcb->hcam.u.error.u.type_13_error;
  1561. errors_logged = be32_to_cpu(error->errors_logged);
  1562. ipr_err("Device Errors Detected/Logged: %d/%d\n",
  1563. be32_to_cpu(error->errors_detected), errors_logged);
  1564. dev_entry = error->dev;
  1565. for (i = 0; i < errors_logged; i++, dev_entry++) {
  1566. ipr_err_separator;
  1567. ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
  1568. ipr_log_ext_vpd(&dev_entry->vpd);
  1569. ipr_err("-----New Device Information-----\n");
  1570. ipr_log_ext_vpd(&dev_entry->new_vpd);
  1571. ipr_err("Cache Directory Card Information:\n");
  1572. ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
  1573. ipr_err("Adapter Card Information:\n");
  1574. ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
  1575. }
  1576. }
  1577. /**
  1578. * ipr_log_sis64_config_error - Log a device error.
  1579. * @ioa_cfg: ioa config struct
  1580. * @hostrcb: hostrcb struct
  1581. *
  1582. * Return value:
  1583. * none
  1584. **/
  1585. static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
  1586. struct ipr_hostrcb *hostrcb)
  1587. {
  1588. int errors_logged, i;
  1589. struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
  1590. struct ipr_hostrcb_type_23_error *error;
  1591. char buffer[IPR_MAX_RES_PATH_LENGTH];
  1592. error = &hostrcb->hcam.u.error64.u.type_23_error;
  1593. errors_logged = be32_to_cpu(error->errors_logged);
  1594. ipr_err("Device Errors Detected/Logged: %d/%d\n",
  1595. be32_to_cpu(error->errors_detected), errors_logged);
  1596. dev_entry = error->dev;
  1597. for (i = 0; i < errors_logged; i++, dev_entry++) {
  1598. ipr_err_separator;
  1599. ipr_err("Device %d : %s", i + 1,
  1600. __ipr_format_res_path(dev_entry->res_path,
  1601. buffer, sizeof(buffer)));
  1602. ipr_log_ext_vpd(&dev_entry->vpd);
  1603. ipr_err("-----New Device Information-----\n");
  1604. ipr_log_ext_vpd(&dev_entry->new_vpd);
  1605. ipr_err("Cache Directory Card Information:\n");
  1606. ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
  1607. ipr_err("Adapter Card Information:\n");
  1608. ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
  1609. }
  1610. }
  1611. /**
  1612. * ipr_log_config_error - Log a configuration error.
  1613. * @ioa_cfg: ioa config struct
  1614. * @hostrcb: hostrcb struct
  1615. *
  1616. * Return value:
  1617. * none
  1618. **/
  1619. static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
  1620. struct ipr_hostrcb *hostrcb)
  1621. {
  1622. int errors_logged, i;
  1623. struct ipr_hostrcb_device_data_entry *dev_entry;
  1624. struct ipr_hostrcb_type_03_error *error;
  1625. error = &hostrcb->hcam.u.error.u.type_03_error;
  1626. errors_logged = be32_to_cpu(error->errors_logged);
  1627. ipr_err("Device Errors Detected/Logged: %d/%d\n",
  1628. be32_to_cpu(error->errors_detected), errors_logged);
  1629. dev_entry = error->dev;
  1630. for (i = 0; i < errors_logged; i++, dev_entry++) {
  1631. ipr_err_separator;
  1632. ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
  1633. ipr_log_vpd(&dev_entry->vpd);
  1634. ipr_err("-----New Device Information-----\n");
  1635. ipr_log_vpd(&dev_entry->new_vpd);
  1636. ipr_err("Cache Directory Card Information:\n");
  1637. ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
  1638. ipr_err("Adapter Card Information:\n");
  1639. ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
  1640. ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
  1641. be32_to_cpu(dev_entry->ioa_data[0]),
  1642. be32_to_cpu(dev_entry->ioa_data[1]),
  1643. be32_to_cpu(dev_entry->ioa_data[2]),
  1644. be32_to_cpu(dev_entry->ioa_data[3]),
  1645. be32_to_cpu(dev_entry->ioa_data[4]));
  1646. }
  1647. }
  1648. /**
  1649. * ipr_log_enhanced_array_error - Log an array configuration error.
  1650. * @ioa_cfg: ioa config struct
  1651. * @hostrcb: hostrcb struct
  1652. *
  1653. * Return value:
  1654. * none
  1655. **/
  1656. static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
  1657. struct ipr_hostrcb *hostrcb)
  1658. {
  1659. int i, num_entries;
  1660. struct ipr_hostrcb_type_14_error *error;
  1661. struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
  1662. const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
  1663. error = &hostrcb->hcam.u.error.u.type_14_error;
  1664. ipr_err_separator;
  1665. ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
  1666. error->protection_level,
  1667. ioa_cfg->host->host_no,
  1668. error->last_func_vset_res_addr.bus,
  1669. error->last_func_vset_res_addr.target,
  1670. error->last_func_vset_res_addr.lun);
  1671. ipr_err_separator;
  1672. array_entry = error->array_member;
  1673. num_entries = min_t(u32, be32_to_cpu(error->num_entries),
  1674. ARRAY_SIZE(error->array_member));
  1675. for (i = 0; i < num_entries; i++, array_entry++) {
  1676. if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
  1677. continue;
  1678. if (be32_to_cpu(error->exposed_mode_adn) == i)
  1679. ipr_err("Exposed Array Member %d:\n", i);
  1680. else
  1681. ipr_err("Array Member %d:\n", i);
  1682. ipr_log_ext_vpd(&array_entry->vpd);
  1683. ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
  1684. ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
  1685. "Expected Location");
  1686. ipr_err_separator;
  1687. }
  1688. }
  1689. /**
  1690. * ipr_log_array_error - Log an array configuration error.
  1691. * @ioa_cfg: ioa config struct
  1692. * @hostrcb: hostrcb struct
  1693. *
  1694. * Return value:
  1695. * none
  1696. **/
  1697. static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
  1698. struct ipr_hostrcb *hostrcb)
  1699. {
  1700. int i;
  1701. struct ipr_hostrcb_type_04_error *error;
  1702. struct ipr_hostrcb_array_data_entry *array_entry;
  1703. const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
  1704. error = &hostrcb->hcam.u.error.u.type_04_error;
  1705. ipr_err_separator;
  1706. ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
  1707. error->protection_level,
  1708. ioa_cfg->host->host_no,
  1709. error->last_func_vset_res_addr.bus,
  1710. error->last_func_vset_res_addr.target,
  1711. error->last_func_vset_res_addr.lun);
  1712. ipr_err_separator;
  1713. array_entry = error->array_member;
  1714. for (i = 0; i < 18; i++) {
  1715. if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
  1716. continue;
  1717. if (be32_to_cpu(error->exposed_mode_adn) == i)
  1718. ipr_err("Exposed Array Member %d:\n", i);
  1719. else
  1720. ipr_err("Array Member %d:\n", i);
  1721. ipr_log_vpd(&array_entry->vpd);
  1722. ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
  1723. ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
  1724. "Expected Location");
  1725. ipr_err_separator;
  1726. if (i == 9)
  1727. array_entry = error->array_member2;
  1728. else
  1729. array_entry++;
  1730. }
  1731. }
  1732. /**
  1733. * ipr_log_hex_data - Log additional hex IOA error data.
  1734. * @ioa_cfg: ioa config struct
  1735. * @data: IOA error data
  1736. * @len: data length
  1737. *
  1738. * Return value:
  1739. * none
  1740. **/
  1741. static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
  1742. {
  1743. int i;
  1744. if (len == 0)
  1745. return;
  1746. if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
  1747. len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
  1748. for (i = 0; i < len / 4; i += 4) {
  1749. ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
  1750. be32_to_cpu(data[i]),
  1751. be32_to_cpu(data[i+1]),
  1752. be32_to_cpu(data[i+2]),
  1753. be32_to_cpu(data[i+3]));
  1754. }
  1755. }
  1756. /**
  1757. * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
  1758. * @ioa_cfg: ioa config struct
  1759. * @hostrcb: hostrcb struct
  1760. *
  1761. * Return value:
  1762. * none
  1763. **/
  1764. static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
  1765. struct ipr_hostrcb *hostrcb)
  1766. {
  1767. struct ipr_hostrcb_type_17_error *error;
  1768. if (ioa_cfg->sis64)
  1769. error = &hostrcb->hcam.u.error64.u.type_17_error;
  1770. else
  1771. error = &hostrcb->hcam.u.error.u.type_17_error;
  1772. error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
  1773. strim(error->failure_reason);
  1774. ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
  1775. be32_to_cpu(hostrcb->hcam.u.error.prc));
  1776. ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
  1777. ipr_log_hex_data(ioa_cfg, error->data,
  1778. be32_to_cpu(hostrcb->hcam.length) -
  1779. (offsetof(struct ipr_hostrcb_error, u) +
  1780. offsetof(struct ipr_hostrcb_type_17_error, data)));
  1781. }
  1782. /**
  1783. * ipr_log_dual_ioa_error - Log a dual adapter error.
  1784. * @ioa_cfg: ioa config struct
  1785. * @hostrcb: hostrcb struct
  1786. *
  1787. * Return value:
  1788. * none
  1789. **/
  1790. static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
  1791. struct ipr_hostrcb *hostrcb)
  1792. {
  1793. struct ipr_hostrcb_type_07_error *error;
  1794. error = &hostrcb->hcam.u.error.u.type_07_error;
  1795. error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
  1796. strim(error->failure_reason);
  1797. ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
  1798. be32_to_cpu(hostrcb->hcam.u.error.prc));
  1799. ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
  1800. ipr_log_hex_data(ioa_cfg, error->data,
  1801. be32_to_cpu(hostrcb->hcam.length) -
  1802. (offsetof(struct ipr_hostrcb_error, u) +
  1803. offsetof(struct ipr_hostrcb_type_07_error, data)));
  1804. }
  1805. static const struct {
  1806. u8 active;
  1807. char *desc;
  1808. } path_active_desc[] = {
  1809. { IPR_PATH_NO_INFO, "Path" },
  1810. { IPR_PATH_ACTIVE, "Active path" },
  1811. { IPR_PATH_NOT_ACTIVE, "Inactive path" }
  1812. };
  1813. static const struct {
  1814. u8 state;
  1815. char *desc;
  1816. } path_state_desc[] = {
  1817. { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
  1818. { IPR_PATH_HEALTHY, "is healthy" },
  1819. { IPR_PATH_DEGRADED, "is degraded" },
  1820. { IPR_PATH_FAILED, "is failed" }
  1821. };
  1822. /**
  1823. * ipr_log_fabric_path - Log a fabric path error
  1824. * @hostrcb: hostrcb struct
  1825. * @fabric: fabric descriptor
  1826. *
  1827. * Return value:
  1828. * none
  1829. **/
  1830. static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
  1831. struct ipr_hostrcb_fabric_desc *fabric)
  1832. {
  1833. int i, j;
  1834. u8 path_state = fabric->path_state;
  1835. u8 active = path_state & IPR_PATH_ACTIVE_MASK;
  1836. u8 state = path_state & IPR_PATH_STATE_MASK;
  1837. for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
  1838. if (path_active_desc[i].active != active)
  1839. continue;
  1840. for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
  1841. if (path_state_desc[j].state != state)
  1842. continue;
  1843. if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
  1844. ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
  1845. path_active_desc[i].desc, path_state_desc[j].desc,
  1846. fabric->ioa_port);
  1847. } else if (fabric->cascaded_expander == 0xff) {
  1848. ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
  1849. path_active_desc[i].desc, path_state_desc[j].desc,
  1850. fabric->ioa_port, fabric->phy);
  1851. } else if (fabric->phy == 0xff) {
  1852. ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
  1853. path_active_desc[i].desc, path_state_desc[j].desc,
  1854. fabric->ioa_port, fabric->cascaded_expander);
  1855. } else {
  1856. ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
  1857. path_active_desc[i].desc, path_state_desc[j].desc,
  1858. fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
  1859. }
  1860. return;
  1861. }
  1862. }
  1863. ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
  1864. fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
  1865. }
  1866. /**
  1867. * ipr_log64_fabric_path - Log a fabric path error
  1868. * @hostrcb: hostrcb struct
  1869. * @fabric: fabric descriptor
  1870. *
  1871. * Return value:
  1872. * none
  1873. **/
  1874. static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
  1875. struct ipr_hostrcb64_fabric_desc *fabric)
  1876. {
  1877. int i, j;
  1878. u8 path_state = fabric->path_state;
  1879. u8 active = path_state & IPR_PATH_ACTIVE_MASK;
  1880. u8 state = path_state & IPR_PATH_STATE_MASK;
  1881. char buffer[IPR_MAX_RES_PATH_LENGTH];
  1882. for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
  1883. if (path_active_desc[i].active != active)
  1884. continue;
  1885. for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
  1886. if (path_state_desc[j].state != state)
  1887. continue;
  1888. ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
  1889. path_active_desc[i].desc, path_state_desc[j].desc,
  1890. ipr_format_res_path(hostrcb->ioa_cfg,
  1891. fabric->res_path,
  1892. buffer, sizeof(buffer)));
  1893. return;
  1894. }
  1895. }
  1896. ipr_err("Path state=%02X Resource Path=%s\n", path_state,
  1897. ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
  1898. buffer, sizeof(buffer)));
  1899. }
  1900. static const struct {
  1901. u8 type;
  1902. char *desc;
  1903. } path_type_desc[] = {
  1904. { IPR_PATH_CFG_IOA_PORT, "IOA port" },
  1905. { IPR_PATH_CFG_EXP_PORT, "Expander port" },
  1906. { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
  1907. { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
  1908. };
  1909. static const struct {
  1910. u8 status;
  1911. char *desc;
  1912. } path_status_desc[] = {
  1913. { IPR_PATH_CFG_NO_PROB, "Functional" },
  1914. { IPR_PATH_CFG_DEGRADED, "Degraded" },
  1915. { IPR_PATH_CFG_FAILED, "Failed" },
  1916. { IPR_PATH_CFG_SUSPECT, "Suspect" },
  1917. { IPR_PATH_NOT_DETECTED, "Missing" },
  1918. { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
  1919. };
  1920. static const char *link_rate[] = {
  1921. "unknown",
  1922. "disabled",
  1923. "phy reset problem",
  1924. "spinup hold",
  1925. "port selector",
  1926. "unknown",
  1927. "unknown",
  1928. "unknown",
  1929. "1.5Gbps",
  1930. "3.0Gbps",
  1931. "unknown",
  1932. "unknown",
  1933. "unknown",
  1934. "unknown",
  1935. "unknown",
  1936. "unknown"
  1937. };
  1938. /**
  1939. * ipr_log_path_elem - Log a fabric path element.
  1940. * @hostrcb: hostrcb struct
  1941. * @cfg: fabric path element struct
  1942. *
  1943. * Return value:
  1944. * none
  1945. **/
  1946. static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
  1947. struct ipr_hostrcb_config_element *cfg)
  1948. {
  1949. int i, j;
  1950. u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
  1951. u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
  1952. if (type == IPR_PATH_CFG_NOT_EXIST)
  1953. return;
  1954. for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
  1955. if (path_type_desc[i].type != type)
  1956. continue;
  1957. for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
  1958. if (path_status_desc[j].status != status)
  1959. continue;
  1960. if (type == IPR_PATH_CFG_IOA_PORT) {
  1961. ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
  1962. path_status_desc[j].desc, path_type_desc[i].desc,
  1963. cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
  1964. be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
  1965. } else {
  1966. if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
  1967. ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
  1968. path_status_desc[j].desc, path_type_desc[i].desc,
  1969. link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
  1970. be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
  1971. } else if (cfg->cascaded_expander == 0xff) {
  1972. ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
  1973. "WWN=%08X%08X\n", path_status_desc[j].desc,
  1974. path_type_desc[i].desc, cfg->phy,
  1975. link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
  1976. be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
  1977. } else if (cfg->phy == 0xff) {
  1978. ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
  1979. "WWN=%08X%08X\n", path_status_desc[j].desc,
  1980. path_type_desc[i].desc, cfg->cascaded_expander,
  1981. link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
  1982. be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
  1983. } else {
  1984. ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
  1985. "WWN=%08X%08X\n", path_status_desc[j].desc,
  1986. path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
  1987. link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
  1988. be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
  1989. }
  1990. }
  1991. return;
  1992. }
  1993. }
  1994. ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
  1995. "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
  1996. link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
  1997. be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
  1998. }
  1999. /**
  2000. * ipr_log64_path_elem - Log a fabric path element.
  2001. * @hostrcb: hostrcb struct
  2002. * @cfg: fabric path element struct
  2003. *
  2004. * Return value:
  2005. * none
  2006. **/
  2007. static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
  2008. struct ipr_hostrcb64_config_element *cfg)
  2009. {
  2010. int i, j;
  2011. u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
  2012. u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
  2013. u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
  2014. char buffer[IPR_MAX_RES_PATH_LENGTH];
  2015. if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
  2016. return;
  2017. for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
  2018. if (path_type_desc[i].type != type)
  2019. continue;
  2020. for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
  2021. if (path_status_desc[j].status != status)
  2022. continue;
  2023. ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
  2024. path_status_desc[j].desc, path_type_desc[i].desc,
  2025. ipr_format_res_path(hostrcb->ioa_cfg,
  2026. cfg->res_path, buffer, sizeof(buffer)),
  2027. link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
  2028. be32_to_cpu(cfg->wwid[0]),
  2029. be32_to_cpu(cfg->wwid[1]));
  2030. return;
  2031. }
  2032. }
  2033. ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
  2034. "WWN=%08X%08X\n", cfg->type_status,
  2035. ipr_format_res_path(hostrcb->ioa_cfg,
  2036. cfg->res_path, buffer, sizeof(buffer)),
  2037. link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
  2038. be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
  2039. }
  2040. /**
  2041. * ipr_log_fabric_error - Log a fabric error.
  2042. * @ioa_cfg: ioa config struct
  2043. * @hostrcb: hostrcb struct
  2044. *
  2045. * Return value:
  2046. * none
  2047. **/
  2048. static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
  2049. struct ipr_hostrcb *hostrcb)
  2050. {
  2051. struct ipr_hostrcb_type_20_error *error;
  2052. struct ipr_hostrcb_fabric_desc *fabric;
  2053. struct ipr_hostrcb_config_element *cfg;
  2054. int i, add_len;
  2055. error = &hostrcb->hcam.u.error.u.type_20_error;
  2056. error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
  2057. ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
  2058. add_len = be32_to_cpu(hostrcb->hcam.length) -
  2059. (offsetof(struct ipr_hostrcb_error, u) +
  2060. offsetof(struct ipr_hostrcb_type_20_error, desc));
  2061. for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
  2062. ipr_log_fabric_path(hostrcb, fabric);
  2063. for_each_fabric_cfg(fabric, cfg)
  2064. ipr_log_path_elem(hostrcb, cfg);
  2065. add_len -= be16_to_cpu(fabric->length);
  2066. fabric = (struct ipr_hostrcb_fabric_desc *)
  2067. ((unsigned long)fabric + be16_to_cpu(fabric->length));
  2068. }
  2069. ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
  2070. }
  2071. /**
  2072. * ipr_log_sis64_array_error - Log a sis64 array error.
  2073. * @ioa_cfg: ioa config struct
  2074. * @hostrcb: hostrcb struct
  2075. *
  2076. * Return value:
  2077. * none
  2078. **/
  2079. static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
  2080. struct ipr_hostrcb *hostrcb)
  2081. {
  2082. int i, num_entries;
  2083. struct ipr_hostrcb_type_24_error *error;
  2084. struct ipr_hostrcb64_array_data_entry *array_entry;
  2085. char buffer[IPR_MAX_RES_PATH_LENGTH];
  2086. const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
  2087. error = &hostrcb->hcam.u.error64.u.type_24_error;
  2088. ipr_err_separator;
  2089. ipr_err("RAID %s Array Configuration: %s\n",
  2090. error->protection_level,
  2091. ipr_format_res_path(ioa_cfg, error->last_res_path,
  2092. buffer, sizeof(buffer)));
  2093. ipr_err_separator;
  2094. array_entry = error->array_member;
  2095. num_entries = min_t(u32, error->num_entries,
  2096. ARRAY_SIZE(error->array_member));
  2097. for (i = 0; i < num_entries; i++, array_entry++) {
  2098. if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
  2099. continue;
  2100. if (error->exposed_mode_adn == i)
  2101. ipr_err("Exposed Array Member %d:\n", i);
  2102. else
  2103. ipr_err("Array Member %d:\n", i);
  2104. ipr_err("Array Member %d:\n", i);
  2105. ipr_log_ext_vpd(&array_entry->vpd);
  2106. ipr_err("Current Location: %s\n",
  2107. ipr_format_res_path(ioa_cfg, array_entry->res_path,
  2108. buffer, sizeof(buffer)));
  2109. ipr_err("Expected Location: %s\n",
  2110. ipr_format_res_path(ioa_cfg,
  2111. array_entry->expected_res_path,
  2112. buffer, sizeof(buffer)));
  2113. ipr_err_separator;
  2114. }
  2115. }
  2116. /**
  2117. * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
  2118. * @ioa_cfg: ioa config struct
  2119. * @hostrcb: hostrcb struct
  2120. *
  2121. * Return value:
  2122. * none
  2123. **/
  2124. static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
  2125. struct ipr_hostrcb *hostrcb)
  2126. {
  2127. struct ipr_hostrcb_type_30_error *error;
  2128. struct ipr_hostrcb64_fabric_desc *fabric;
  2129. struct ipr_hostrcb64_config_element *cfg;
  2130. int i, add_len;
  2131. error = &hostrcb->hcam.u.error64.u.type_30_error;
  2132. error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
  2133. ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
  2134. add_len = be32_to_cpu(hostrcb->hcam.length) -
  2135. (offsetof(struct ipr_hostrcb64_error, u) +
  2136. offsetof(struct ipr_hostrcb_type_30_error, desc));
  2137. for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
  2138. ipr_log64_fabric_path(hostrcb, fabric);
  2139. for_each_fabric_cfg(fabric, cfg)
  2140. ipr_log64_path_elem(hostrcb, cfg);
  2141. add_len -= be16_to_cpu(fabric->length);
  2142. fabric = (struct ipr_hostrcb64_fabric_desc *)
  2143. ((unsigned long)fabric + be16_to_cpu(fabric->length));
  2144. }
  2145. ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
  2146. }
  2147. /**
  2148. * ipr_log_sis64_service_required_error - Log a sis64 service required error.
  2149. * @ioa_cfg: ioa config struct
  2150. * @hostrcb: hostrcb struct
  2151. *
  2152. * Return value:
  2153. * none
  2154. **/
  2155. static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
  2156. struct ipr_hostrcb *hostrcb)
  2157. {
  2158. struct ipr_hostrcb_type_41_error *error;
  2159. error = &hostrcb->hcam.u.error64.u.type_41_error;
  2160. error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
  2161. ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
  2162. ipr_log_hex_data(ioa_cfg, error->data,
  2163. be32_to_cpu(hostrcb->hcam.length) -
  2164. (offsetof(struct ipr_hostrcb_error, u) +
  2165. offsetof(struct ipr_hostrcb_type_41_error, data)));
  2166. }
  2167. /**
  2168. * ipr_log_generic_error - Log an adapter error.
  2169. * @ioa_cfg: ioa config struct
  2170. * @hostrcb: hostrcb struct
  2171. *
  2172. * Return value:
  2173. * none
  2174. **/
  2175. static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
  2176. struct ipr_hostrcb *hostrcb)
  2177. {
  2178. ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
  2179. be32_to_cpu(hostrcb->hcam.length));
  2180. }
  2181. /**
  2182. * ipr_log_sis64_device_error - Log a cache error.
  2183. * @ioa_cfg: ioa config struct
  2184. * @hostrcb: hostrcb struct
  2185. *
  2186. * Return value:
  2187. * none
  2188. **/
  2189. static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
  2190. struct ipr_hostrcb *hostrcb)
  2191. {
  2192. struct ipr_hostrcb_type_21_error *error;
  2193. char buffer[IPR_MAX_RES_PATH_LENGTH];
  2194. error = &hostrcb->hcam.u.error64.u.type_21_error;
  2195. ipr_err("-----Failing Device Information-----\n");
  2196. ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
  2197. be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
  2198. be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
  2199. ipr_err("Device Resource Path: %s\n",
  2200. __ipr_format_res_path(error->res_path,
  2201. buffer, sizeof(buffer)));
  2202. error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
  2203. error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
  2204. ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
  2205. ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
  2206. ipr_err("SCSI Sense Data:\n");
  2207. ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
  2208. ipr_err("SCSI Command Descriptor Block: \n");
  2209. ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
  2210. ipr_err("Additional IOA Data:\n");
  2211. ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
  2212. }
  2213. /**
  2214. * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
  2215. * @ioasc: IOASC
  2216. *
  2217. * This function will return the index of into the ipr_error_table
  2218. * for the specified IOASC. If the IOASC is not in the table,
  2219. * 0 will be returned, which points to the entry used for unknown errors.
  2220. *
  2221. * Return value:
  2222. * index into the ipr_error_table
  2223. **/
  2224. static u32 ipr_get_error(u32 ioasc)
  2225. {
  2226. int i;
  2227. for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
  2228. if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
  2229. return i;
  2230. return 0;
  2231. }
  2232. /**
  2233. * ipr_handle_log_data - Log an adapter error.
  2234. * @ioa_cfg: ioa config struct
  2235. * @hostrcb: hostrcb struct
  2236. *
  2237. * This function logs an adapter error to the system.
  2238. *
  2239. * Return value:
  2240. * none
  2241. **/
  2242. static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
  2243. struct ipr_hostrcb *hostrcb)
  2244. {
  2245. u32 ioasc;
  2246. int error_index;
  2247. struct ipr_hostrcb_type_21_error *error;
  2248. if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
  2249. return;
  2250. if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
  2251. dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
  2252. if (ioa_cfg->sis64)
  2253. ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
  2254. else
  2255. ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
  2256. if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
  2257. ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
  2258. /* Tell the midlayer we had a bus reset so it will handle the UA properly */
  2259. scsi_report_bus_reset(ioa_cfg->host,
  2260. hostrcb->hcam.u.error.fd_res_addr.bus);
  2261. }
  2262. error_index = ipr_get_error(ioasc);
  2263. if (!ipr_error_table[error_index].log_hcam)
  2264. return;
  2265. if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
  2266. hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
  2267. error = &hostrcb->hcam.u.error64.u.type_21_error;
  2268. if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
  2269. ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
  2270. return;
  2271. }
  2272. ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
  2273. /* Set indication we have logged an error */
  2274. ioa_cfg->errors_logged++;
  2275. if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
  2276. return;
  2277. if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
  2278. hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
  2279. switch (hostrcb->hcam.overlay_id) {
  2280. case IPR_HOST_RCB_OVERLAY_ID_2:
  2281. ipr_log_cache_error(ioa_cfg, hostrcb);
  2282. break;
  2283. case IPR_HOST_RCB_OVERLAY_ID_3:
  2284. ipr_log_config_error(ioa_cfg, hostrcb);
  2285. break;
  2286. case IPR_HOST_RCB_OVERLAY_ID_4:
  2287. case IPR_HOST_RCB_OVERLAY_ID_6:
  2288. ipr_log_array_error(ioa_cfg, hostrcb);
  2289. break;
  2290. case IPR_HOST_RCB_OVERLAY_ID_7:
  2291. ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
  2292. break;
  2293. case IPR_HOST_RCB_OVERLAY_ID_12:
  2294. ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
  2295. break;
  2296. case IPR_HOST_RCB_OVERLAY_ID_13:
  2297. ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
  2298. break;
  2299. case IPR_HOST_RCB_OVERLAY_ID_14:
  2300. case IPR_HOST_RCB_OVERLAY_ID_16:
  2301. ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
  2302. break;
  2303. case IPR_HOST_RCB_OVERLAY_ID_17:
  2304. ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
  2305. break;
  2306. case IPR_HOST_RCB_OVERLAY_ID_20:
  2307. ipr_log_fabric_error(ioa_cfg, hostrcb);
  2308. break;
  2309. case IPR_HOST_RCB_OVERLAY_ID_21:
  2310. ipr_log_sis64_device_error(ioa_cfg, hostrcb);
  2311. break;
  2312. case IPR_HOST_RCB_OVERLAY_ID_23:
  2313. ipr_log_sis64_config_error(ioa_cfg, hostrcb);
  2314. break;
  2315. case IPR_HOST_RCB_OVERLAY_ID_24:
  2316. case IPR_HOST_RCB_OVERLAY_ID_26:
  2317. ipr_log_sis64_array_error(ioa_cfg, hostrcb);
  2318. break;
  2319. case IPR_HOST_RCB_OVERLAY_ID_30:
  2320. ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
  2321. break;
  2322. case IPR_HOST_RCB_OVERLAY_ID_41:
  2323. ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
  2324. break;
  2325. case IPR_HOST_RCB_OVERLAY_ID_1:
  2326. case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
  2327. default:
  2328. ipr_log_generic_error(ioa_cfg, hostrcb);
  2329. break;
  2330. }
  2331. }
  2332. static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
  2333. {
  2334. struct ipr_hostrcb *hostrcb;
  2335. hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
  2336. struct ipr_hostrcb, queue);
  2337. if (unlikely(!hostrcb)) {
  2338. dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
  2339. hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
  2340. struct ipr_hostrcb, queue);
  2341. }
  2342. list_del_init(&hostrcb->queue);
  2343. return hostrcb;
  2344. }
  2345. /**
  2346. * ipr_process_error - Op done function for an adapter error log.
  2347. * @ipr_cmd: ipr command struct
  2348. *
  2349. * This function is the op done function for an error log host
  2350. * controlled async from the adapter. It will log the error and
  2351. * send the HCAM back to the adapter.
  2352. *
  2353. * Return value:
  2354. * none
  2355. **/
  2356. static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
  2357. {
  2358. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  2359. struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
  2360. u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
  2361. u32 fd_ioasc;
  2362. if (ioa_cfg->sis64)
  2363. fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
  2364. else
  2365. fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
  2366. list_del_init(&hostrcb->queue);
  2367. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  2368. if (!ioasc) {
  2369. ipr_handle_log_data(ioa_cfg, hostrcb);
  2370. if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
  2371. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
  2372. } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
  2373. ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
  2374. dev_err(&ioa_cfg->pdev->dev,
  2375. "Host RCB failed with IOASC: 0x%08X\n", ioasc);
  2376. }
  2377. list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
  2378. schedule_work(&ioa_cfg->work_q);
  2379. hostrcb = ipr_get_free_hostrcb(ioa_cfg);
  2380. ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
  2381. }
  2382. /**
  2383. * ipr_timeout - An internally generated op has timed out.
  2384. * @t: Timer context used to fetch ipr command struct
  2385. *
  2386. * This function blocks host requests and initiates an
  2387. * adapter reset.
  2388. *
  2389. * Return value:
  2390. * none
  2391. **/
  2392. static void ipr_timeout(struct timer_list *t)
  2393. {
  2394. struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
  2395. unsigned long lock_flags = 0;
  2396. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  2397. ENTER;
  2398. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  2399. ioa_cfg->errors_logged++;
  2400. dev_err(&ioa_cfg->pdev->dev,
  2401. "Adapter being reset due to command timeout.\n");
  2402. if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
  2403. ioa_cfg->sdt_state = GET_DUMP;
  2404. if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
  2405. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
  2406. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  2407. LEAVE;
  2408. }
  2409. /**
  2410. * ipr_oper_timeout - Adapter timed out transitioning to operational
  2411. * @t: Timer context used to fetch ipr command struct
  2412. *
  2413. * This function blocks host requests and initiates an
  2414. * adapter reset.
  2415. *
  2416. * Return value:
  2417. * none
  2418. **/
  2419. static void ipr_oper_timeout(struct timer_list *t)
  2420. {
  2421. struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
  2422. unsigned long lock_flags = 0;
  2423. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  2424. ENTER;
  2425. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  2426. ioa_cfg->errors_logged++;
  2427. dev_err(&ioa_cfg->pdev->dev,
  2428. "Adapter timed out transitioning to operational.\n");
  2429. if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
  2430. ioa_cfg->sdt_state = GET_DUMP;
  2431. if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
  2432. if (ipr_fastfail)
  2433. ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
  2434. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
  2435. }
  2436. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  2437. LEAVE;
  2438. }
  2439. /**
  2440. * ipr_find_ses_entry - Find matching SES in SES table
  2441. * @res: resource entry struct of SES
  2442. *
  2443. * Return value:
  2444. * pointer to SES table entry / NULL on failure
  2445. **/
  2446. static const struct ipr_ses_table_entry *
  2447. ipr_find_ses_entry(struct ipr_resource_entry *res)
  2448. {
  2449. int i, j, matches;
  2450. struct ipr_std_inq_vpids *vpids;
  2451. const struct ipr_ses_table_entry *ste = ipr_ses_table;
  2452. for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
  2453. for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
  2454. if (ste->compare_product_id_byte[j] == 'X') {
  2455. vpids = &res->std_inq_data.vpids;
  2456. if (vpids->product_id[j] == ste->product_id[j])
  2457. matches++;
  2458. else
  2459. break;
  2460. } else
  2461. matches++;
  2462. }
  2463. if (matches == IPR_PROD_ID_LEN)
  2464. return ste;
  2465. }
  2466. return NULL;
  2467. }
  2468. /**
  2469. * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
  2470. * @ioa_cfg: ioa config struct
  2471. * @bus: SCSI bus
  2472. * @bus_width: bus width
  2473. *
  2474. * Return value:
  2475. * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
  2476. * For a 2-byte wide SCSI bus, the maximum transfer speed is
  2477. * twice the maximum transfer rate (e.g. for a wide enabled bus,
  2478. * max 160MHz = max 320MB/sec).
  2479. **/
  2480. static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
  2481. {
  2482. struct ipr_resource_entry *res;
  2483. const struct ipr_ses_table_entry *ste;
  2484. u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
  2485. /* Loop through each config table entry in the config table buffer */
  2486. list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
  2487. if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
  2488. continue;
  2489. if (bus != res->bus)
  2490. continue;
  2491. if (!(ste = ipr_find_ses_entry(res)))
  2492. continue;
  2493. max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
  2494. }
  2495. return max_xfer_rate;
  2496. }
  2497. /**
  2498. * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
  2499. * @ioa_cfg: ioa config struct
  2500. * @max_delay: max delay in micro-seconds to wait
  2501. *
  2502. * Waits for an IODEBUG ACK from the IOA, doing busy looping.
  2503. *
  2504. * Return value:
  2505. * 0 on success / other on failure
  2506. **/
  2507. static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
  2508. {
  2509. volatile u32 pcii_reg;
  2510. int delay = 1;
  2511. /* Read interrupt reg until IOA signals IO Debug Acknowledge */
  2512. while (delay < max_delay) {
  2513. pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
  2514. if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
  2515. return 0;
  2516. /* udelay cannot be used if delay is more than a few milliseconds */
  2517. if ((delay / 1000) > MAX_UDELAY_MS)
  2518. mdelay(delay / 1000);
  2519. else
  2520. udelay(delay);
  2521. delay += delay;
  2522. }
  2523. return -EIO;
  2524. }
  2525. /**
  2526. * ipr_get_sis64_dump_data_section - Dump IOA memory
  2527. * @ioa_cfg: ioa config struct
  2528. * @start_addr: adapter address to dump
  2529. * @dest: destination kernel buffer
  2530. * @length_in_words: length to dump in 4 byte words
  2531. *
  2532. * Return value:
  2533. * 0 on success
  2534. **/
  2535. static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
  2536. u32 start_addr,
  2537. __be32 *dest, u32 length_in_words)
  2538. {
  2539. int i;
  2540. for (i = 0; i < length_in_words; i++) {
  2541. writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
  2542. *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
  2543. dest++;
  2544. }
  2545. return 0;
  2546. }
  2547. /**
  2548. * ipr_get_ldump_data_section - Dump IOA memory
  2549. * @ioa_cfg: ioa config struct
  2550. * @start_addr: adapter address to dump
  2551. * @dest: destination kernel buffer
  2552. * @length_in_words: length to dump in 4 byte words
  2553. *
  2554. * Return value:
  2555. * 0 on success / -EIO on failure
  2556. **/
  2557. static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
  2558. u32 start_addr,
  2559. __be32 *dest, u32 length_in_words)
  2560. {
  2561. volatile u32 temp_pcii_reg;
  2562. int i, delay = 0;
  2563. if (ioa_cfg->sis64)
  2564. return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
  2565. dest, length_in_words);
  2566. /* Write IOA interrupt reg starting LDUMP state */
  2567. writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
  2568. ioa_cfg->regs.set_uproc_interrupt_reg32);
  2569. /* Wait for IO debug acknowledge */
  2570. if (ipr_wait_iodbg_ack(ioa_cfg,
  2571. IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
  2572. dev_err(&ioa_cfg->pdev->dev,
  2573. "IOA dump long data transfer timeout\n");
  2574. return -EIO;
  2575. }
  2576. /* Signal LDUMP interlocked - clear IO debug ack */
  2577. writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
  2578. ioa_cfg->regs.clr_interrupt_reg);
  2579. /* Write Mailbox with starting address */
  2580. writel(start_addr, ioa_cfg->ioa_mailbox);
  2581. /* Signal address valid - clear IOA Reset alert */
  2582. writel(IPR_UPROCI_RESET_ALERT,
  2583. ioa_cfg->regs.clr_uproc_interrupt_reg32);
  2584. for (i = 0; i < length_in_words; i++) {
  2585. /* Wait for IO debug acknowledge */
  2586. if (ipr_wait_iodbg_ack(ioa_cfg,
  2587. IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
  2588. dev_err(&ioa_cfg->pdev->dev,
  2589. "IOA dump short data transfer timeout\n");
  2590. return -EIO;
  2591. }
  2592. /* Read data from mailbox and increment destination pointer */
  2593. *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
  2594. dest++;
  2595. /* For all but the last word of data, signal data received */
  2596. if (i < (length_in_words - 1)) {
  2597. /* Signal dump data received - Clear IO debug Ack */
  2598. writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
  2599. ioa_cfg->regs.clr_interrupt_reg);
  2600. }
  2601. }
  2602. /* Signal end of block transfer. Set reset alert then clear IO debug ack */
  2603. writel(IPR_UPROCI_RESET_ALERT,
  2604. ioa_cfg->regs.set_uproc_interrupt_reg32);
  2605. writel(IPR_UPROCI_IO_DEBUG_ALERT,
  2606. ioa_cfg->regs.clr_uproc_interrupt_reg32);
  2607. /* Signal dump data received - Clear IO debug Ack */
  2608. writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
  2609. ioa_cfg->regs.clr_interrupt_reg);
  2610. /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
  2611. while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
  2612. temp_pcii_reg =
  2613. readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
  2614. if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
  2615. return 0;
  2616. udelay(10);
  2617. delay += 10;
  2618. }
  2619. return 0;
  2620. }
  2621. #ifdef CONFIG_SCSI_IPR_DUMP
  2622. /**
  2623. * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
  2624. * @ioa_cfg: ioa config struct
  2625. * @pci_address: adapter address
  2626. * @length: length of data to copy
  2627. *
  2628. * Copy data from PCI adapter to kernel buffer.
  2629. * Note: length MUST be a 4 byte multiple
  2630. * Return value:
  2631. * 0 on success / other on failure
  2632. **/
  2633. static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
  2634. unsigned long pci_address, u32 length)
  2635. {
  2636. int bytes_copied = 0;
  2637. int cur_len, rc, rem_len, rem_page_len, max_dump_size;
  2638. __be32 *page;
  2639. unsigned long lock_flags = 0;
  2640. struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
  2641. if (ioa_cfg->sis64)
  2642. max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
  2643. else
  2644. max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
  2645. while (bytes_copied < length &&
  2646. (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
  2647. if (ioa_dump->page_offset >= PAGE_SIZE ||
  2648. ioa_dump->page_offset == 0) {
  2649. page = (__be32 *)__get_free_page(GFP_ATOMIC);
  2650. if (!page) {
  2651. ipr_trace;
  2652. return bytes_copied;
  2653. }
  2654. ioa_dump->page_offset = 0;
  2655. ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
  2656. ioa_dump->next_page_index++;
  2657. } else
  2658. page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
  2659. rem_len = length - bytes_copied;
  2660. rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
  2661. cur_len = min(rem_len, rem_page_len);
  2662. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  2663. if (ioa_cfg->sdt_state == ABORT_DUMP) {
  2664. rc = -EIO;
  2665. } else {
  2666. rc = ipr_get_ldump_data_section(ioa_cfg,
  2667. pci_address + bytes_copied,
  2668. &page[ioa_dump->page_offset / 4],
  2669. (cur_len / sizeof(u32)));
  2670. }
  2671. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  2672. if (!rc) {
  2673. ioa_dump->page_offset += cur_len;
  2674. bytes_copied += cur_len;
  2675. } else {
  2676. ipr_trace;
  2677. break;
  2678. }
  2679. schedule();
  2680. }
  2681. return bytes_copied;
  2682. }
  2683. /**
  2684. * ipr_init_dump_entry_hdr - Initialize a dump entry header.
  2685. * @hdr: dump entry header struct
  2686. *
  2687. * Return value:
  2688. * nothing
  2689. **/
  2690. static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
  2691. {
  2692. hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
  2693. hdr->num_elems = 1;
  2694. hdr->offset = sizeof(*hdr);
  2695. hdr->status = IPR_DUMP_STATUS_SUCCESS;
  2696. }
  2697. /**
  2698. * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
  2699. * @ioa_cfg: ioa config struct
  2700. * @driver_dump: driver dump struct
  2701. *
  2702. * Return value:
  2703. * nothing
  2704. **/
  2705. static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
  2706. struct ipr_driver_dump *driver_dump)
  2707. {
  2708. struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
  2709. ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
  2710. driver_dump->ioa_type_entry.hdr.len =
  2711. sizeof(struct ipr_dump_ioa_type_entry) -
  2712. sizeof(struct ipr_dump_entry_header);
  2713. driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
  2714. driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
  2715. driver_dump->ioa_type_entry.type = ioa_cfg->type;
  2716. driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
  2717. (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
  2718. ucode_vpd->minor_release[1];
  2719. driver_dump->hdr.num_entries++;
  2720. }
  2721. /**
  2722. * ipr_dump_version_data - Fill in the driver version in the dump.
  2723. * @ioa_cfg: ioa config struct
  2724. * @driver_dump: driver dump struct
  2725. *
  2726. * Return value:
  2727. * nothing
  2728. **/
  2729. static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
  2730. struct ipr_driver_dump *driver_dump)
  2731. {
  2732. ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
  2733. driver_dump->version_entry.hdr.len =
  2734. sizeof(struct ipr_dump_version_entry) -
  2735. sizeof(struct ipr_dump_entry_header);
  2736. driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
  2737. driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
  2738. strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
  2739. driver_dump->hdr.num_entries++;
  2740. }
  2741. /**
  2742. * ipr_dump_trace_data - Fill in the IOA trace in the dump.
  2743. * @ioa_cfg: ioa config struct
  2744. * @driver_dump: driver dump struct
  2745. *
  2746. * Return value:
  2747. * nothing
  2748. **/
  2749. static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
  2750. struct ipr_driver_dump *driver_dump)
  2751. {
  2752. ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
  2753. driver_dump->trace_entry.hdr.len =
  2754. sizeof(struct ipr_dump_trace_entry) -
  2755. sizeof(struct ipr_dump_entry_header);
  2756. driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
  2757. driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
  2758. memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
  2759. driver_dump->hdr.num_entries++;
  2760. }
  2761. /**
  2762. * ipr_dump_location_data - Fill in the IOA location in the dump.
  2763. * @ioa_cfg: ioa config struct
  2764. * @driver_dump: driver dump struct
  2765. *
  2766. * Return value:
  2767. * nothing
  2768. **/
  2769. static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
  2770. struct ipr_driver_dump *driver_dump)
  2771. {
  2772. ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
  2773. driver_dump->location_entry.hdr.len =
  2774. sizeof(struct ipr_dump_location_entry) -
  2775. sizeof(struct ipr_dump_entry_header);
  2776. driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
  2777. driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
  2778. strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
  2779. driver_dump->hdr.num_entries++;
  2780. }
  2781. /**
  2782. * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
  2783. * @ioa_cfg: ioa config struct
  2784. * @dump: dump struct
  2785. *
  2786. * Return value:
  2787. * nothing
  2788. **/
  2789. static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
  2790. {
  2791. unsigned long start_addr, sdt_word;
  2792. unsigned long lock_flags = 0;
  2793. struct ipr_driver_dump *driver_dump = &dump->driver_dump;
  2794. struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
  2795. u32 num_entries, max_num_entries, start_off, end_off;
  2796. u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
  2797. struct ipr_sdt *sdt;
  2798. int valid = 1;
  2799. int i;
  2800. ENTER;
  2801. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  2802. if (ioa_cfg->sdt_state != READ_DUMP) {
  2803. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  2804. return;
  2805. }
  2806. if (ioa_cfg->sis64) {
  2807. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  2808. ssleep(IPR_DUMP_DELAY_SECONDS);
  2809. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  2810. }
  2811. start_addr = readl(ioa_cfg->ioa_mailbox);
  2812. if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
  2813. dev_err(&ioa_cfg->pdev->dev,
  2814. "Invalid dump table format: %lx\n", start_addr);
  2815. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  2816. return;
  2817. }
  2818. dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
  2819. driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
  2820. /* Initialize the overall dump header */
  2821. driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
  2822. driver_dump->hdr.num_entries = 1;
  2823. driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
  2824. driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
  2825. driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
  2826. driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
  2827. ipr_dump_version_data(ioa_cfg, driver_dump);
  2828. ipr_dump_location_data(ioa_cfg, driver_dump);
  2829. ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
  2830. ipr_dump_trace_data(ioa_cfg, driver_dump);
  2831. /* Update dump_header */
  2832. driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
  2833. /* IOA Dump entry */
  2834. ipr_init_dump_entry_hdr(&ioa_dump->hdr);
  2835. ioa_dump->hdr.len = 0;
  2836. ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
  2837. ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
  2838. /* First entries in sdt are actually a list of dump addresses and
  2839. lengths to gather the real dump data. sdt represents the pointer
  2840. to the ioa generated dump table. Dump data will be extracted based
  2841. on entries in this table */
  2842. sdt = &ioa_dump->sdt;
  2843. if (ioa_cfg->sis64) {
  2844. max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
  2845. max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
  2846. } else {
  2847. max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
  2848. max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
  2849. }
  2850. bytes_to_copy = offsetof(struct ipr_sdt, entry) +
  2851. (max_num_entries * sizeof(struct ipr_sdt_entry));
  2852. rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
  2853. bytes_to_copy / sizeof(__be32));
  2854. /* Smart Dump table is ready to use and the first entry is valid */
  2855. if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
  2856. (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
  2857. dev_err(&ioa_cfg->pdev->dev,
  2858. "Dump of IOA failed. Dump table not valid: %d, %X.\n",
  2859. rc, be32_to_cpu(sdt->hdr.state));
  2860. driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
  2861. ioa_cfg->sdt_state = DUMP_OBTAINED;
  2862. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  2863. return;
  2864. }
  2865. num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
  2866. if (num_entries > max_num_entries)
  2867. num_entries = max_num_entries;
  2868. /* Update dump length to the actual data to be copied */
  2869. dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
  2870. if (ioa_cfg->sis64)
  2871. dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
  2872. else
  2873. dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
  2874. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  2875. for (i = 0; i < num_entries; i++) {
  2876. if (ioa_dump->hdr.len > max_dump_size) {
  2877. driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
  2878. break;
  2879. }
  2880. if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
  2881. sdt_word = be32_to_cpu(sdt->entry[i].start_token);
  2882. if (ioa_cfg->sis64)
  2883. bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
  2884. else {
  2885. start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
  2886. end_off = be32_to_cpu(sdt->entry[i].end_token);
  2887. if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
  2888. bytes_to_copy = end_off - start_off;
  2889. else
  2890. valid = 0;
  2891. }
  2892. if (valid) {
  2893. if (bytes_to_copy > max_dump_size) {
  2894. sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
  2895. continue;
  2896. }
  2897. /* Copy data from adapter to driver buffers */
  2898. bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
  2899. bytes_to_copy);
  2900. ioa_dump->hdr.len += bytes_copied;
  2901. if (bytes_copied != bytes_to_copy) {
  2902. driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
  2903. break;
  2904. }
  2905. }
  2906. }
  2907. }
  2908. dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
  2909. /* Update dump_header */
  2910. driver_dump->hdr.len += ioa_dump->hdr.len;
  2911. wmb();
  2912. ioa_cfg->sdt_state = DUMP_OBTAINED;
  2913. LEAVE;
  2914. }
  2915. #else
  2916. #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
  2917. #endif
  2918. /**
  2919. * ipr_release_dump - Free adapter dump memory
  2920. * @kref: kref struct
  2921. *
  2922. * Return value:
  2923. * nothing
  2924. **/
  2925. static void ipr_release_dump(struct kref *kref)
  2926. {
  2927. struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
  2928. struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
  2929. unsigned long lock_flags = 0;
  2930. int i;
  2931. ENTER;
  2932. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  2933. ioa_cfg->dump = NULL;
  2934. ioa_cfg->sdt_state = INACTIVE;
  2935. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  2936. for (i = 0; i < dump->ioa_dump.next_page_index; i++)
  2937. free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
  2938. vfree(dump->ioa_dump.ioa_data);
  2939. kfree(dump);
  2940. LEAVE;
  2941. }
  2942. static void ipr_add_remove_thread(struct work_struct *work)
  2943. {
  2944. unsigned long lock_flags;
  2945. struct ipr_resource_entry *res;
  2946. struct scsi_device *sdev;
  2947. struct ipr_ioa_cfg *ioa_cfg =
  2948. container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
  2949. u8 bus, target, lun;
  2950. int did_work;
  2951. ENTER;
  2952. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  2953. restart:
  2954. do {
  2955. did_work = 0;
  2956. if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
  2957. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  2958. return;
  2959. }
  2960. list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
  2961. if (res->del_from_ml && res->sdev) {
  2962. did_work = 1;
  2963. sdev = res->sdev;
  2964. if (!scsi_device_get(sdev)) {
  2965. if (!res->add_to_ml)
  2966. list_move_tail(&res->queue, &ioa_cfg->free_res_q);
  2967. else
  2968. res->del_from_ml = 0;
  2969. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  2970. scsi_remove_device(sdev);
  2971. scsi_device_put(sdev);
  2972. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  2973. }
  2974. break;
  2975. }
  2976. }
  2977. } while (did_work);
  2978. list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
  2979. if (res->add_to_ml) {
  2980. bus = res->bus;
  2981. target = res->target;
  2982. lun = res->lun;
  2983. res->add_to_ml = 0;
  2984. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  2985. scsi_add_device(ioa_cfg->host, bus, target, lun);
  2986. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  2987. goto restart;
  2988. }
  2989. }
  2990. ioa_cfg->scan_done = 1;
  2991. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  2992. kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
  2993. LEAVE;
  2994. }
  2995. /**
  2996. * ipr_worker_thread - Worker thread
  2997. * @work: ioa config struct
  2998. *
  2999. * Called at task level from a work thread. This function takes care
  3000. * of adding and removing device from the mid-layer as configuration
  3001. * changes are detected by the adapter.
  3002. *
  3003. * Return value:
  3004. * nothing
  3005. **/
  3006. static void ipr_worker_thread(struct work_struct *work)
  3007. {
  3008. unsigned long lock_flags;
  3009. struct ipr_dump *dump;
  3010. struct ipr_ioa_cfg *ioa_cfg =
  3011. container_of(work, struct ipr_ioa_cfg, work_q);
  3012. ENTER;
  3013. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3014. if (ioa_cfg->sdt_state == READ_DUMP) {
  3015. dump = ioa_cfg->dump;
  3016. if (!dump) {
  3017. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3018. return;
  3019. }
  3020. kref_get(&dump->kref);
  3021. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3022. ipr_get_ioa_dump(ioa_cfg, dump);
  3023. kref_put(&dump->kref, ipr_release_dump);
  3024. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3025. if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
  3026. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
  3027. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3028. return;
  3029. }
  3030. if (ioa_cfg->scsi_unblock) {
  3031. ioa_cfg->scsi_unblock = 0;
  3032. ioa_cfg->scsi_blocked = 0;
  3033. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3034. scsi_unblock_requests(ioa_cfg->host);
  3035. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3036. if (ioa_cfg->scsi_blocked)
  3037. scsi_block_requests(ioa_cfg->host);
  3038. }
  3039. if (!ioa_cfg->scan_enabled) {
  3040. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3041. return;
  3042. }
  3043. schedule_work(&ioa_cfg->scsi_add_work_q);
  3044. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3045. LEAVE;
  3046. }
  3047. #ifdef CONFIG_SCSI_IPR_TRACE
  3048. /**
  3049. * ipr_read_trace - Dump the adapter trace
  3050. * @filp: open sysfs file
  3051. * @kobj: kobject struct
  3052. * @bin_attr: bin_attribute struct
  3053. * @buf: buffer
  3054. * @off: offset
  3055. * @count: buffer size
  3056. *
  3057. * Return value:
  3058. * number of bytes printed to buffer
  3059. **/
  3060. static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
  3061. struct bin_attribute *bin_attr,
  3062. char *buf, loff_t off, size_t count)
  3063. {
  3064. struct device *dev = kobj_to_dev(kobj);
  3065. struct Scsi_Host *shost = class_to_shost(dev);
  3066. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  3067. unsigned long lock_flags = 0;
  3068. ssize_t ret;
  3069. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3070. ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
  3071. IPR_TRACE_SIZE);
  3072. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3073. return ret;
  3074. }
  3075. static struct bin_attribute ipr_trace_attr = {
  3076. .attr = {
  3077. .name = "trace",
  3078. .mode = S_IRUGO,
  3079. },
  3080. .size = 0,
  3081. .read = ipr_read_trace,
  3082. };
  3083. #endif
  3084. /**
  3085. * ipr_show_fw_version - Show the firmware version
  3086. * @dev: class device struct
  3087. * @attr: device attribute (unused)
  3088. * @buf: buffer
  3089. *
  3090. * Return value:
  3091. * number of bytes printed to buffer
  3092. **/
  3093. static ssize_t ipr_show_fw_version(struct device *dev,
  3094. struct device_attribute *attr, char *buf)
  3095. {
  3096. struct Scsi_Host *shost = class_to_shost(dev);
  3097. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  3098. struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
  3099. unsigned long lock_flags = 0;
  3100. int len;
  3101. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3102. len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
  3103. ucode_vpd->major_release, ucode_vpd->card_type,
  3104. ucode_vpd->minor_release[0],
  3105. ucode_vpd->minor_release[1]);
  3106. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3107. return len;
  3108. }
  3109. static struct device_attribute ipr_fw_version_attr = {
  3110. .attr = {
  3111. .name = "fw_version",
  3112. .mode = S_IRUGO,
  3113. },
  3114. .show = ipr_show_fw_version,
  3115. };
  3116. /**
  3117. * ipr_show_log_level - Show the adapter's error logging level
  3118. * @dev: class device struct
  3119. * @attr: device attribute (unused)
  3120. * @buf: buffer
  3121. *
  3122. * Return value:
  3123. * number of bytes printed to buffer
  3124. **/
  3125. static ssize_t ipr_show_log_level(struct device *dev,
  3126. struct device_attribute *attr, char *buf)
  3127. {
  3128. struct Scsi_Host *shost = class_to_shost(dev);
  3129. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  3130. unsigned long lock_flags = 0;
  3131. int len;
  3132. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3133. len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
  3134. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3135. return len;
  3136. }
  3137. /**
  3138. * ipr_store_log_level - Change the adapter's error logging level
  3139. * @dev: class device struct
  3140. * @attr: device attribute (unused)
  3141. * @buf: buffer
  3142. * @count: buffer size
  3143. *
  3144. * Return value:
  3145. * number of bytes printed to buffer
  3146. **/
  3147. static ssize_t ipr_store_log_level(struct device *dev,
  3148. struct device_attribute *attr,
  3149. const char *buf, size_t count)
  3150. {
  3151. struct Scsi_Host *shost = class_to_shost(dev);
  3152. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  3153. unsigned long lock_flags = 0;
  3154. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3155. ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
  3156. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3157. return strlen(buf);
  3158. }
  3159. static struct device_attribute ipr_log_level_attr = {
  3160. .attr = {
  3161. .name = "log_level",
  3162. .mode = S_IRUGO | S_IWUSR,
  3163. },
  3164. .show = ipr_show_log_level,
  3165. .store = ipr_store_log_level
  3166. };
  3167. /**
  3168. * ipr_store_diagnostics - IOA Diagnostics interface
  3169. * @dev: device struct
  3170. * @attr: device attribute (unused)
  3171. * @buf: buffer
  3172. * @count: buffer size
  3173. *
  3174. * This function will reset the adapter and wait a reasonable
  3175. * amount of time for any errors that the adapter might log.
  3176. *
  3177. * Return value:
  3178. * count on success / other on failure
  3179. **/
  3180. static ssize_t ipr_store_diagnostics(struct device *dev,
  3181. struct device_attribute *attr,
  3182. const char *buf, size_t count)
  3183. {
  3184. struct Scsi_Host *shost = class_to_shost(dev);
  3185. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  3186. unsigned long lock_flags = 0;
  3187. int rc = count;
  3188. if (!capable(CAP_SYS_ADMIN))
  3189. return -EACCES;
  3190. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3191. while (ioa_cfg->in_reset_reload) {
  3192. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3193. wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
  3194. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3195. }
  3196. ioa_cfg->errors_logged = 0;
  3197. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
  3198. if (ioa_cfg->in_reset_reload) {
  3199. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3200. wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
  3201. /* Wait for a second for any errors to be logged */
  3202. msleep(1000);
  3203. } else {
  3204. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3205. return -EIO;
  3206. }
  3207. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3208. if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
  3209. rc = -EIO;
  3210. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3211. return rc;
  3212. }
  3213. static struct device_attribute ipr_diagnostics_attr = {
  3214. .attr = {
  3215. .name = "run_diagnostics",
  3216. .mode = S_IWUSR,
  3217. },
  3218. .store = ipr_store_diagnostics
  3219. };
  3220. /**
  3221. * ipr_show_adapter_state - Show the adapter's state
  3222. * @dev: device struct
  3223. * @attr: device attribute (unused)
  3224. * @buf: buffer
  3225. *
  3226. * Return value:
  3227. * number of bytes printed to buffer
  3228. **/
  3229. static ssize_t ipr_show_adapter_state(struct device *dev,
  3230. struct device_attribute *attr, char *buf)
  3231. {
  3232. struct Scsi_Host *shost = class_to_shost(dev);
  3233. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  3234. unsigned long lock_flags = 0;
  3235. int len;
  3236. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3237. if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
  3238. len = snprintf(buf, PAGE_SIZE, "offline\n");
  3239. else
  3240. len = snprintf(buf, PAGE_SIZE, "online\n");
  3241. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3242. return len;
  3243. }
  3244. /**
  3245. * ipr_store_adapter_state - Change adapter state
  3246. * @dev: device struct
  3247. * @attr: device attribute (unused)
  3248. * @buf: buffer
  3249. * @count: buffer size
  3250. *
  3251. * This function will change the adapter's state.
  3252. *
  3253. * Return value:
  3254. * count on success / other on failure
  3255. **/
  3256. static ssize_t ipr_store_adapter_state(struct device *dev,
  3257. struct device_attribute *attr,
  3258. const char *buf, size_t count)
  3259. {
  3260. struct Scsi_Host *shost = class_to_shost(dev);
  3261. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  3262. unsigned long lock_flags;
  3263. int result = count, i;
  3264. if (!capable(CAP_SYS_ADMIN))
  3265. return -EACCES;
  3266. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3267. if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
  3268. !strncmp(buf, "online", 6)) {
  3269. for (i = 0; i < ioa_cfg->hrrq_num; i++) {
  3270. spin_lock(&ioa_cfg->hrrq[i]._lock);
  3271. ioa_cfg->hrrq[i].ioa_is_dead = 0;
  3272. spin_unlock(&ioa_cfg->hrrq[i]._lock);
  3273. }
  3274. wmb();
  3275. ioa_cfg->reset_retries = 0;
  3276. ioa_cfg->in_ioa_bringdown = 0;
  3277. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
  3278. }
  3279. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3280. wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
  3281. return result;
  3282. }
  3283. static struct device_attribute ipr_ioa_state_attr = {
  3284. .attr = {
  3285. .name = "online_state",
  3286. .mode = S_IRUGO | S_IWUSR,
  3287. },
  3288. .show = ipr_show_adapter_state,
  3289. .store = ipr_store_adapter_state
  3290. };
  3291. /**
  3292. * ipr_store_reset_adapter - Reset the adapter
  3293. * @dev: device struct
  3294. * @attr: device attribute (unused)
  3295. * @buf: buffer
  3296. * @count: buffer size
  3297. *
  3298. * This function will reset the adapter.
  3299. *
  3300. * Return value:
  3301. * count on success / other on failure
  3302. **/
  3303. static ssize_t ipr_store_reset_adapter(struct device *dev,
  3304. struct device_attribute *attr,
  3305. const char *buf, size_t count)
  3306. {
  3307. struct Scsi_Host *shost = class_to_shost(dev);
  3308. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  3309. unsigned long lock_flags;
  3310. int result = count;
  3311. if (!capable(CAP_SYS_ADMIN))
  3312. return -EACCES;
  3313. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3314. if (!ioa_cfg->in_reset_reload)
  3315. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
  3316. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3317. wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
  3318. return result;
  3319. }
  3320. static struct device_attribute ipr_ioa_reset_attr = {
  3321. .attr = {
  3322. .name = "reset_host",
  3323. .mode = S_IWUSR,
  3324. },
  3325. .store = ipr_store_reset_adapter
  3326. };
  3327. static int ipr_iopoll(struct irq_poll *iop, int budget);
  3328. /**
  3329. * ipr_show_iopoll_weight - Show ipr polling mode
  3330. * @dev: class device struct
  3331. * @attr: device attribute (unused)
  3332. * @buf: buffer
  3333. *
  3334. * Return value:
  3335. * number of bytes printed to buffer
  3336. **/
  3337. static ssize_t ipr_show_iopoll_weight(struct device *dev,
  3338. struct device_attribute *attr, char *buf)
  3339. {
  3340. struct Scsi_Host *shost = class_to_shost(dev);
  3341. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  3342. unsigned long lock_flags = 0;
  3343. int len;
  3344. spin_lock_irqsave(shost->host_lock, lock_flags);
  3345. len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
  3346. spin_unlock_irqrestore(shost->host_lock, lock_flags);
  3347. return len;
  3348. }
  3349. /**
  3350. * ipr_store_iopoll_weight - Change the adapter's polling mode
  3351. * @dev: class device struct
  3352. * @attr: device attribute (unused)
  3353. * @buf: buffer
  3354. * @count: buffer size
  3355. *
  3356. * Return value:
  3357. * number of bytes printed to buffer
  3358. **/
  3359. static ssize_t ipr_store_iopoll_weight(struct device *dev,
  3360. struct device_attribute *attr,
  3361. const char *buf, size_t count)
  3362. {
  3363. struct Scsi_Host *shost = class_to_shost(dev);
  3364. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  3365. unsigned long user_iopoll_weight;
  3366. unsigned long lock_flags = 0;
  3367. int i;
  3368. if (!ioa_cfg->sis64) {
  3369. dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
  3370. return -EINVAL;
  3371. }
  3372. if (kstrtoul(buf, 10, &user_iopoll_weight))
  3373. return -EINVAL;
  3374. if (user_iopoll_weight > 256) {
  3375. dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
  3376. return -EINVAL;
  3377. }
  3378. if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
  3379. dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
  3380. return strlen(buf);
  3381. }
  3382. if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
  3383. for (i = 1; i < ioa_cfg->hrrq_num; i++)
  3384. irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
  3385. }
  3386. spin_lock_irqsave(shost->host_lock, lock_flags);
  3387. ioa_cfg->iopoll_weight = user_iopoll_weight;
  3388. if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
  3389. for (i = 1; i < ioa_cfg->hrrq_num; i++) {
  3390. irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
  3391. ioa_cfg->iopoll_weight, ipr_iopoll);
  3392. }
  3393. }
  3394. spin_unlock_irqrestore(shost->host_lock, lock_flags);
  3395. return strlen(buf);
  3396. }
  3397. static struct device_attribute ipr_iopoll_weight_attr = {
  3398. .attr = {
  3399. .name = "iopoll_weight",
  3400. .mode = S_IRUGO | S_IWUSR,
  3401. },
  3402. .show = ipr_show_iopoll_weight,
  3403. .store = ipr_store_iopoll_weight
  3404. };
  3405. /**
  3406. * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
  3407. * @buf_len: buffer length
  3408. *
  3409. * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
  3410. * list to use for microcode download
  3411. *
  3412. * Return value:
  3413. * pointer to sglist / NULL on failure
  3414. **/
  3415. static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
  3416. {
  3417. int sg_size, order;
  3418. struct ipr_sglist *sglist;
  3419. /* Get the minimum size per scatter/gather element */
  3420. sg_size = buf_len / (IPR_MAX_SGLIST - 1);
  3421. /* Get the actual size per element */
  3422. order = get_order(sg_size);
  3423. /* Allocate a scatter/gather list for the DMA */
  3424. sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
  3425. if (sglist == NULL) {
  3426. ipr_trace;
  3427. return NULL;
  3428. }
  3429. sglist->order = order;
  3430. sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
  3431. &sglist->num_sg);
  3432. if (!sglist->scatterlist) {
  3433. kfree(sglist);
  3434. return NULL;
  3435. }
  3436. return sglist;
  3437. }
  3438. /**
  3439. * ipr_free_ucode_buffer - Frees a microcode download buffer
  3440. * @sglist: scatter/gather list pointer
  3441. *
  3442. * Free a DMA'able ucode download buffer previously allocated with
  3443. * ipr_alloc_ucode_buffer
  3444. *
  3445. * Return value:
  3446. * nothing
  3447. **/
  3448. static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
  3449. {
  3450. sgl_free_order(sglist->scatterlist, sglist->order);
  3451. kfree(sglist);
  3452. }
  3453. /**
  3454. * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
  3455. * @sglist: scatter/gather list pointer
  3456. * @buffer: buffer pointer
  3457. * @len: buffer length
  3458. *
  3459. * Copy a microcode image from a user buffer into a buffer allocated by
  3460. * ipr_alloc_ucode_buffer
  3461. *
  3462. * Return value:
  3463. * 0 on success / other on failure
  3464. **/
  3465. static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
  3466. u8 *buffer, u32 len)
  3467. {
  3468. int bsize_elem, i, result = 0;
  3469. struct scatterlist *sg;
  3470. void *kaddr;
  3471. /* Determine the actual number of bytes per element */
  3472. bsize_elem = PAGE_SIZE * (1 << sglist->order);
  3473. sg = sglist->scatterlist;
  3474. for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
  3475. buffer += bsize_elem) {
  3476. struct page *page = sg_page(sg);
  3477. kaddr = kmap(page);
  3478. memcpy(kaddr, buffer, bsize_elem);
  3479. kunmap(page);
  3480. sg->length = bsize_elem;
  3481. if (result != 0) {
  3482. ipr_trace;
  3483. return result;
  3484. }
  3485. }
  3486. if (len % bsize_elem) {
  3487. struct page *page = sg_page(sg);
  3488. kaddr = kmap(page);
  3489. memcpy(kaddr, buffer, len % bsize_elem);
  3490. kunmap(page);
  3491. sg->length = len % bsize_elem;
  3492. }
  3493. sglist->buffer_len = len;
  3494. return result;
  3495. }
  3496. /**
  3497. * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
  3498. * @ipr_cmd: ipr command struct
  3499. * @sglist: scatter/gather list
  3500. *
  3501. * Builds a microcode download IOA data list (IOADL).
  3502. *
  3503. **/
  3504. static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
  3505. struct ipr_sglist *sglist)
  3506. {
  3507. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  3508. struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
  3509. struct scatterlist *scatterlist = sglist->scatterlist;
  3510. struct scatterlist *sg;
  3511. int i;
  3512. ipr_cmd->dma_use_sg = sglist->num_dma_sg;
  3513. ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
  3514. ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
  3515. ioarcb->ioadl_len =
  3516. cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
  3517. for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
  3518. ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
  3519. ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
  3520. ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
  3521. }
  3522. ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
  3523. }
  3524. /**
  3525. * ipr_build_ucode_ioadl - Build a microcode download IOADL
  3526. * @ipr_cmd: ipr command struct
  3527. * @sglist: scatter/gather list
  3528. *
  3529. * Builds a microcode download IOA data list (IOADL).
  3530. *
  3531. **/
  3532. static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
  3533. struct ipr_sglist *sglist)
  3534. {
  3535. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  3536. struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
  3537. struct scatterlist *scatterlist = sglist->scatterlist;
  3538. struct scatterlist *sg;
  3539. int i;
  3540. ipr_cmd->dma_use_sg = sglist->num_dma_sg;
  3541. ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
  3542. ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
  3543. ioarcb->ioadl_len =
  3544. cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
  3545. for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
  3546. ioadl[i].flags_and_data_len =
  3547. cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
  3548. ioadl[i].address =
  3549. cpu_to_be32(sg_dma_address(sg));
  3550. }
  3551. ioadl[i-1].flags_and_data_len |=
  3552. cpu_to_be32(IPR_IOADL_FLAGS_LAST);
  3553. }
  3554. /**
  3555. * ipr_update_ioa_ucode - Update IOA's microcode
  3556. * @ioa_cfg: ioa config struct
  3557. * @sglist: scatter/gather list
  3558. *
  3559. * Initiate an adapter reset to update the IOA's microcode
  3560. *
  3561. * Return value:
  3562. * 0 on success / -EIO on failure
  3563. **/
  3564. static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
  3565. struct ipr_sglist *sglist)
  3566. {
  3567. unsigned long lock_flags;
  3568. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3569. while (ioa_cfg->in_reset_reload) {
  3570. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3571. wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
  3572. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3573. }
  3574. if (ioa_cfg->ucode_sglist) {
  3575. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3576. dev_err(&ioa_cfg->pdev->dev,
  3577. "Microcode download already in progress\n");
  3578. return -EIO;
  3579. }
  3580. sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
  3581. sglist->scatterlist, sglist->num_sg,
  3582. DMA_TO_DEVICE);
  3583. if (!sglist->num_dma_sg) {
  3584. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3585. dev_err(&ioa_cfg->pdev->dev,
  3586. "Failed to map microcode download buffer!\n");
  3587. return -EIO;
  3588. }
  3589. ioa_cfg->ucode_sglist = sglist;
  3590. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
  3591. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3592. wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
  3593. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3594. ioa_cfg->ucode_sglist = NULL;
  3595. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3596. return 0;
  3597. }
  3598. /**
  3599. * ipr_store_update_fw - Update the firmware on the adapter
  3600. * @dev: device struct
  3601. * @attr: device attribute (unused)
  3602. * @buf: buffer
  3603. * @count: buffer size
  3604. *
  3605. * This function will update the firmware on the adapter.
  3606. *
  3607. * Return value:
  3608. * count on success / other on failure
  3609. **/
  3610. static ssize_t ipr_store_update_fw(struct device *dev,
  3611. struct device_attribute *attr,
  3612. const char *buf, size_t count)
  3613. {
  3614. struct Scsi_Host *shost = class_to_shost(dev);
  3615. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  3616. struct ipr_ucode_image_header *image_hdr;
  3617. const struct firmware *fw_entry;
  3618. struct ipr_sglist *sglist;
  3619. char fname[100];
  3620. char *src;
  3621. char *endline;
  3622. int result, dnld_size;
  3623. if (!capable(CAP_SYS_ADMIN))
  3624. return -EACCES;
  3625. snprintf(fname, sizeof(fname), "%s", buf);
  3626. endline = strchr(fname, '\n');
  3627. if (endline)
  3628. *endline = '\0';
  3629. if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
  3630. dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
  3631. return -EIO;
  3632. }
  3633. image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
  3634. src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
  3635. dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
  3636. sglist = ipr_alloc_ucode_buffer(dnld_size);
  3637. if (!sglist) {
  3638. dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
  3639. release_firmware(fw_entry);
  3640. return -ENOMEM;
  3641. }
  3642. result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
  3643. if (result) {
  3644. dev_err(&ioa_cfg->pdev->dev,
  3645. "Microcode buffer copy to DMA buffer failed\n");
  3646. goto out;
  3647. }
  3648. ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
  3649. result = ipr_update_ioa_ucode(ioa_cfg, sglist);
  3650. if (!result)
  3651. result = count;
  3652. out:
  3653. ipr_free_ucode_buffer(sglist);
  3654. release_firmware(fw_entry);
  3655. return result;
  3656. }
  3657. static struct device_attribute ipr_update_fw_attr = {
  3658. .attr = {
  3659. .name = "update_fw",
  3660. .mode = S_IWUSR,
  3661. },
  3662. .store = ipr_store_update_fw
  3663. };
  3664. /**
  3665. * ipr_show_fw_type - Show the adapter's firmware type.
  3666. * @dev: class device struct
  3667. * @attr: device attribute (unused)
  3668. * @buf: buffer
  3669. *
  3670. * Return value:
  3671. * number of bytes printed to buffer
  3672. **/
  3673. static ssize_t ipr_show_fw_type(struct device *dev,
  3674. struct device_attribute *attr, char *buf)
  3675. {
  3676. struct Scsi_Host *shost = class_to_shost(dev);
  3677. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  3678. unsigned long lock_flags = 0;
  3679. int len;
  3680. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3681. len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
  3682. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3683. return len;
  3684. }
  3685. static struct device_attribute ipr_ioa_fw_type_attr = {
  3686. .attr = {
  3687. .name = "fw_type",
  3688. .mode = S_IRUGO,
  3689. },
  3690. .show = ipr_show_fw_type
  3691. };
  3692. static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
  3693. struct bin_attribute *bin_attr, char *buf,
  3694. loff_t off, size_t count)
  3695. {
  3696. struct device *cdev = kobj_to_dev(kobj);
  3697. struct Scsi_Host *shost = class_to_shost(cdev);
  3698. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  3699. struct ipr_hostrcb *hostrcb;
  3700. unsigned long lock_flags = 0;
  3701. int ret;
  3702. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3703. hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
  3704. struct ipr_hostrcb, queue);
  3705. if (!hostrcb) {
  3706. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3707. return 0;
  3708. }
  3709. ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
  3710. sizeof(hostrcb->hcam));
  3711. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3712. return ret;
  3713. }
  3714. static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
  3715. struct bin_attribute *bin_attr, char *buf,
  3716. loff_t off, size_t count)
  3717. {
  3718. struct device *cdev = kobj_to_dev(kobj);
  3719. struct Scsi_Host *shost = class_to_shost(cdev);
  3720. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  3721. struct ipr_hostrcb *hostrcb;
  3722. unsigned long lock_flags = 0;
  3723. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3724. hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
  3725. struct ipr_hostrcb, queue);
  3726. if (!hostrcb) {
  3727. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3728. return count;
  3729. }
  3730. /* Reclaim hostrcb before exit */
  3731. list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
  3732. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3733. return count;
  3734. }
  3735. static struct bin_attribute ipr_ioa_async_err_log = {
  3736. .attr = {
  3737. .name = "async_err_log",
  3738. .mode = S_IRUGO | S_IWUSR,
  3739. },
  3740. .size = 0,
  3741. .read = ipr_read_async_err_log,
  3742. .write = ipr_next_async_err_log
  3743. };
  3744. static struct attribute *ipr_ioa_attrs[] = {
  3745. &ipr_fw_version_attr.attr,
  3746. &ipr_log_level_attr.attr,
  3747. &ipr_diagnostics_attr.attr,
  3748. &ipr_ioa_state_attr.attr,
  3749. &ipr_ioa_reset_attr.attr,
  3750. &ipr_update_fw_attr.attr,
  3751. &ipr_ioa_fw_type_attr.attr,
  3752. &ipr_iopoll_weight_attr.attr,
  3753. NULL,
  3754. };
  3755. ATTRIBUTE_GROUPS(ipr_ioa);
  3756. #ifdef CONFIG_SCSI_IPR_DUMP
  3757. /**
  3758. * ipr_read_dump - Dump the adapter
  3759. * @filp: open sysfs file
  3760. * @kobj: kobject struct
  3761. * @bin_attr: bin_attribute struct
  3762. * @buf: buffer
  3763. * @off: offset
  3764. * @count: buffer size
  3765. *
  3766. * Return value:
  3767. * number of bytes printed to buffer
  3768. **/
  3769. static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
  3770. struct bin_attribute *bin_attr,
  3771. char *buf, loff_t off, size_t count)
  3772. {
  3773. struct device *cdev = kobj_to_dev(kobj);
  3774. struct Scsi_Host *shost = class_to_shost(cdev);
  3775. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  3776. struct ipr_dump *dump;
  3777. unsigned long lock_flags = 0;
  3778. char *src;
  3779. int len, sdt_end;
  3780. size_t rc = count;
  3781. if (!capable(CAP_SYS_ADMIN))
  3782. return -EACCES;
  3783. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3784. dump = ioa_cfg->dump;
  3785. if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
  3786. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3787. return 0;
  3788. }
  3789. kref_get(&dump->kref);
  3790. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3791. if (off > dump->driver_dump.hdr.len) {
  3792. kref_put(&dump->kref, ipr_release_dump);
  3793. return 0;
  3794. }
  3795. if (off + count > dump->driver_dump.hdr.len) {
  3796. count = dump->driver_dump.hdr.len - off;
  3797. rc = count;
  3798. }
  3799. if (count && off < sizeof(dump->driver_dump)) {
  3800. if (off + count > sizeof(dump->driver_dump))
  3801. len = sizeof(dump->driver_dump) - off;
  3802. else
  3803. len = count;
  3804. src = (u8 *)&dump->driver_dump + off;
  3805. memcpy(buf, src, len);
  3806. buf += len;
  3807. off += len;
  3808. count -= len;
  3809. }
  3810. off -= sizeof(dump->driver_dump);
  3811. if (ioa_cfg->sis64)
  3812. sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
  3813. (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
  3814. sizeof(struct ipr_sdt_entry));
  3815. else
  3816. sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
  3817. (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
  3818. if (count && off < sdt_end) {
  3819. if (off + count > sdt_end)
  3820. len = sdt_end - off;
  3821. else
  3822. len = count;
  3823. src = (u8 *)&dump->ioa_dump + off;
  3824. memcpy(buf, src, len);
  3825. buf += len;
  3826. off += len;
  3827. count -= len;
  3828. }
  3829. off -= sdt_end;
  3830. while (count) {
  3831. if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
  3832. len = PAGE_ALIGN(off) - off;
  3833. else
  3834. len = count;
  3835. src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
  3836. src += off & ~PAGE_MASK;
  3837. memcpy(buf, src, len);
  3838. buf += len;
  3839. off += len;
  3840. count -= len;
  3841. }
  3842. kref_put(&dump->kref, ipr_release_dump);
  3843. return rc;
  3844. }
  3845. /**
  3846. * ipr_alloc_dump - Prepare for adapter dump
  3847. * @ioa_cfg: ioa config struct
  3848. *
  3849. * Return value:
  3850. * 0 on success / other on failure
  3851. **/
  3852. static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
  3853. {
  3854. struct ipr_dump *dump;
  3855. __be32 **ioa_data;
  3856. unsigned long lock_flags = 0;
  3857. dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
  3858. if (!dump) {
  3859. ipr_err("Dump memory allocation failed\n");
  3860. return -ENOMEM;
  3861. }
  3862. if (ioa_cfg->sis64)
  3863. ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
  3864. sizeof(__be32 *)));
  3865. else
  3866. ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
  3867. sizeof(__be32 *)));
  3868. if (!ioa_data) {
  3869. ipr_err("Dump memory allocation failed\n");
  3870. kfree(dump);
  3871. return -ENOMEM;
  3872. }
  3873. dump->ioa_dump.ioa_data = ioa_data;
  3874. kref_init(&dump->kref);
  3875. dump->ioa_cfg = ioa_cfg;
  3876. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3877. if (INACTIVE != ioa_cfg->sdt_state) {
  3878. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3879. vfree(dump->ioa_dump.ioa_data);
  3880. kfree(dump);
  3881. return 0;
  3882. }
  3883. ioa_cfg->dump = dump;
  3884. ioa_cfg->sdt_state = WAIT_FOR_DUMP;
  3885. if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
  3886. ioa_cfg->dump_taken = 1;
  3887. schedule_work(&ioa_cfg->work_q);
  3888. }
  3889. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3890. return 0;
  3891. }
  3892. /**
  3893. * ipr_free_dump - Free adapter dump memory
  3894. * @ioa_cfg: ioa config struct
  3895. *
  3896. * Return value:
  3897. * 0 on success / other on failure
  3898. **/
  3899. static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
  3900. {
  3901. struct ipr_dump *dump;
  3902. unsigned long lock_flags = 0;
  3903. ENTER;
  3904. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3905. dump = ioa_cfg->dump;
  3906. if (!dump) {
  3907. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3908. return 0;
  3909. }
  3910. ioa_cfg->dump = NULL;
  3911. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3912. kref_put(&dump->kref, ipr_release_dump);
  3913. LEAVE;
  3914. return 0;
  3915. }
  3916. /**
  3917. * ipr_write_dump - Setup dump state of adapter
  3918. * @filp: open sysfs file
  3919. * @kobj: kobject struct
  3920. * @bin_attr: bin_attribute struct
  3921. * @buf: buffer
  3922. * @off: offset
  3923. * @count: buffer size
  3924. *
  3925. * Return value:
  3926. * number of bytes printed to buffer
  3927. **/
  3928. static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
  3929. struct bin_attribute *bin_attr,
  3930. char *buf, loff_t off, size_t count)
  3931. {
  3932. struct device *cdev = kobj_to_dev(kobj);
  3933. struct Scsi_Host *shost = class_to_shost(cdev);
  3934. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  3935. int rc;
  3936. if (!capable(CAP_SYS_ADMIN))
  3937. return -EACCES;
  3938. if (buf[0] == '1')
  3939. rc = ipr_alloc_dump(ioa_cfg);
  3940. else if (buf[0] == '0')
  3941. rc = ipr_free_dump(ioa_cfg);
  3942. else
  3943. return -EINVAL;
  3944. if (rc)
  3945. return rc;
  3946. else
  3947. return count;
  3948. }
  3949. static struct bin_attribute ipr_dump_attr = {
  3950. .attr = {
  3951. .name = "dump",
  3952. .mode = S_IRUSR | S_IWUSR,
  3953. },
  3954. .size = 0,
  3955. .read = ipr_read_dump,
  3956. .write = ipr_write_dump
  3957. };
  3958. #else
  3959. static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
  3960. #endif
  3961. /**
  3962. * ipr_change_queue_depth - Change the device's queue depth
  3963. * @sdev: scsi device struct
  3964. * @qdepth: depth to set
  3965. *
  3966. * Return value:
  3967. * actual depth set
  3968. **/
  3969. static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
  3970. {
  3971. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
  3972. struct ipr_resource_entry *res;
  3973. unsigned long lock_flags = 0;
  3974. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3975. res = (struct ipr_resource_entry *)sdev->hostdata;
  3976. if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
  3977. qdepth = IPR_MAX_CMD_PER_ATA_LUN;
  3978. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  3979. scsi_change_queue_depth(sdev, qdepth);
  3980. return sdev->queue_depth;
  3981. }
  3982. /**
  3983. * ipr_show_adapter_handle - Show the adapter's resource handle for this device
  3984. * @dev: device struct
  3985. * @attr: device attribute structure
  3986. * @buf: buffer
  3987. *
  3988. * Return value:
  3989. * number of bytes printed to buffer
  3990. **/
  3991. static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
  3992. {
  3993. struct scsi_device *sdev = to_scsi_device(dev);
  3994. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
  3995. struct ipr_resource_entry *res;
  3996. unsigned long lock_flags = 0;
  3997. ssize_t len = -ENXIO;
  3998. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  3999. res = (struct ipr_resource_entry *)sdev->hostdata;
  4000. if (res)
  4001. len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
  4002. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4003. return len;
  4004. }
  4005. static struct device_attribute ipr_adapter_handle_attr = {
  4006. .attr = {
  4007. .name = "adapter_handle",
  4008. .mode = S_IRUSR,
  4009. },
  4010. .show = ipr_show_adapter_handle
  4011. };
  4012. /**
  4013. * ipr_show_resource_path - Show the resource path or the resource address for
  4014. * this device.
  4015. * @dev: device struct
  4016. * @attr: device attribute structure
  4017. * @buf: buffer
  4018. *
  4019. * Return value:
  4020. * number of bytes printed to buffer
  4021. **/
  4022. static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
  4023. {
  4024. struct scsi_device *sdev = to_scsi_device(dev);
  4025. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
  4026. struct ipr_resource_entry *res;
  4027. unsigned long lock_flags = 0;
  4028. ssize_t len = -ENXIO;
  4029. char buffer[IPR_MAX_RES_PATH_LENGTH];
  4030. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  4031. res = (struct ipr_resource_entry *)sdev->hostdata;
  4032. if (res && ioa_cfg->sis64)
  4033. len = snprintf(buf, PAGE_SIZE, "%s\n",
  4034. __ipr_format_res_path(res->res_path, buffer,
  4035. sizeof(buffer)));
  4036. else if (res)
  4037. len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
  4038. res->bus, res->target, res->lun);
  4039. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4040. return len;
  4041. }
  4042. static struct device_attribute ipr_resource_path_attr = {
  4043. .attr = {
  4044. .name = "resource_path",
  4045. .mode = S_IRUGO,
  4046. },
  4047. .show = ipr_show_resource_path
  4048. };
  4049. /**
  4050. * ipr_show_device_id - Show the device_id for this device.
  4051. * @dev: device struct
  4052. * @attr: device attribute structure
  4053. * @buf: buffer
  4054. *
  4055. * Return value:
  4056. * number of bytes printed to buffer
  4057. **/
  4058. static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
  4059. {
  4060. struct scsi_device *sdev = to_scsi_device(dev);
  4061. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
  4062. struct ipr_resource_entry *res;
  4063. unsigned long lock_flags = 0;
  4064. ssize_t len = -ENXIO;
  4065. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  4066. res = (struct ipr_resource_entry *)sdev->hostdata;
  4067. if (res && ioa_cfg->sis64)
  4068. len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
  4069. else if (res)
  4070. len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
  4071. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4072. return len;
  4073. }
  4074. static struct device_attribute ipr_device_id_attr = {
  4075. .attr = {
  4076. .name = "device_id",
  4077. .mode = S_IRUGO,
  4078. },
  4079. .show = ipr_show_device_id
  4080. };
  4081. /**
  4082. * ipr_show_resource_type - Show the resource type for this device.
  4083. * @dev: device struct
  4084. * @attr: device attribute structure
  4085. * @buf: buffer
  4086. *
  4087. * Return value:
  4088. * number of bytes printed to buffer
  4089. **/
  4090. static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
  4091. {
  4092. struct scsi_device *sdev = to_scsi_device(dev);
  4093. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
  4094. struct ipr_resource_entry *res;
  4095. unsigned long lock_flags = 0;
  4096. ssize_t len = -ENXIO;
  4097. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  4098. res = (struct ipr_resource_entry *)sdev->hostdata;
  4099. if (res)
  4100. len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
  4101. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4102. return len;
  4103. }
  4104. static struct device_attribute ipr_resource_type_attr = {
  4105. .attr = {
  4106. .name = "resource_type",
  4107. .mode = S_IRUGO,
  4108. },
  4109. .show = ipr_show_resource_type
  4110. };
  4111. /**
  4112. * ipr_show_raw_mode - Show the adapter's raw mode
  4113. * @dev: class device struct
  4114. * @attr: device attribute (unused)
  4115. * @buf: buffer
  4116. *
  4117. * Return value:
  4118. * number of bytes printed to buffer
  4119. **/
  4120. static ssize_t ipr_show_raw_mode(struct device *dev,
  4121. struct device_attribute *attr, char *buf)
  4122. {
  4123. struct scsi_device *sdev = to_scsi_device(dev);
  4124. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
  4125. struct ipr_resource_entry *res;
  4126. unsigned long lock_flags = 0;
  4127. ssize_t len;
  4128. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  4129. res = (struct ipr_resource_entry *)sdev->hostdata;
  4130. if (res)
  4131. len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
  4132. else
  4133. len = -ENXIO;
  4134. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4135. return len;
  4136. }
  4137. /**
  4138. * ipr_store_raw_mode - Change the adapter's raw mode
  4139. * @dev: class device struct
  4140. * @attr: device attribute (unused)
  4141. * @buf: buffer
  4142. * @count: buffer size
  4143. *
  4144. * Return value:
  4145. * number of bytes printed to buffer
  4146. **/
  4147. static ssize_t ipr_store_raw_mode(struct device *dev,
  4148. struct device_attribute *attr,
  4149. const char *buf, size_t count)
  4150. {
  4151. struct scsi_device *sdev = to_scsi_device(dev);
  4152. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
  4153. struct ipr_resource_entry *res;
  4154. unsigned long lock_flags = 0;
  4155. ssize_t len;
  4156. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  4157. res = (struct ipr_resource_entry *)sdev->hostdata;
  4158. if (res) {
  4159. if (ipr_is_af_dasd_device(res)) {
  4160. res->raw_mode = simple_strtoul(buf, NULL, 10);
  4161. len = strlen(buf);
  4162. if (res->sdev)
  4163. sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
  4164. res->raw_mode ? "enabled" : "disabled");
  4165. } else
  4166. len = -EINVAL;
  4167. } else
  4168. len = -ENXIO;
  4169. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4170. return len;
  4171. }
  4172. static struct device_attribute ipr_raw_mode_attr = {
  4173. .attr = {
  4174. .name = "raw_mode",
  4175. .mode = S_IRUGO | S_IWUSR,
  4176. },
  4177. .show = ipr_show_raw_mode,
  4178. .store = ipr_store_raw_mode
  4179. };
  4180. static struct attribute *ipr_dev_attrs[] = {
  4181. &ipr_adapter_handle_attr.attr,
  4182. &ipr_resource_path_attr.attr,
  4183. &ipr_device_id_attr.attr,
  4184. &ipr_resource_type_attr.attr,
  4185. &ipr_raw_mode_attr.attr,
  4186. NULL,
  4187. };
  4188. ATTRIBUTE_GROUPS(ipr_dev);
  4189. /**
  4190. * ipr_biosparam - Return the HSC mapping
  4191. * @sdev: scsi device struct
  4192. * @block_device: block device pointer
  4193. * @capacity: capacity of the device
  4194. * @parm: Array containing returned HSC values.
  4195. *
  4196. * This function generates the HSC parms that fdisk uses.
  4197. * We want to make sure we return something that places partitions
  4198. * on 4k boundaries for best performance with the IOA.
  4199. *
  4200. * Return value:
  4201. * 0 on success
  4202. **/
  4203. static int ipr_biosparam(struct scsi_device *sdev,
  4204. struct block_device *block_device,
  4205. sector_t capacity, int *parm)
  4206. {
  4207. int heads, sectors;
  4208. sector_t cylinders;
  4209. heads = 128;
  4210. sectors = 32;
  4211. cylinders = capacity;
  4212. sector_div(cylinders, (128 * 32));
  4213. /* return result */
  4214. parm[0] = heads;
  4215. parm[1] = sectors;
  4216. parm[2] = cylinders;
  4217. return 0;
  4218. }
  4219. /**
  4220. * ipr_find_starget - Find target based on bus/target.
  4221. * @starget: scsi target struct
  4222. *
  4223. * Return value:
  4224. * resource entry pointer if found / NULL if not found
  4225. **/
  4226. static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
  4227. {
  4228. struct Scsi_Host *shost = dev_to_shost(&starget->dev);
  4229. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
  4230. struct ipr_resource_entry *res;
  4231. list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
  4232. if ((res->bus == starget->channel) &&
  4233. (res->target == starget->id)) {
  4234. return res;
  4235. }
  4236. }
  4237. return NULL;
  4238. }
  4239. static struct ata_port_info sata_port_info;
  4240. /**
  4241. * ipr_target_alloc - Prepare for commands to a SCSI target
  4242. * @starget: scsi target struct
  4243. *
  4244. * If the device is a SATA device, this function allocates an
  4245. * ATA port with libata, else it does nothing.
  4246. *
  4247. * Return value:
  4248. * 0 on success / non-0 on failure
  4249. **/
  4250. static int ipr_target_alloc(struct scsi_target *starget)
  4251. {
  4252. struct Scsi_Host *shost = dev_to_shost(&starget->dev);
  4253. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
  4254. struct ipr_sata_port *sata_port;
  4255. struct ata_port *ap;
  4256. struct ipr_resource_entry *res;
  4257. unsigned long lock_flags;
  4258. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  4259. res = ipr_find_starget(starget);
  4260. starget->hostdata = NULL;
  4261. if (res && ipr_is_gata(res)) {
  4262. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4263. sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
  4264. if (!sata_port)
  4265. return -ENOMEM;
  4266. ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
  4267. if (ap) {
  4268. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  4269. sata_port->ioa_cfg = ioa_cfg;
  4270. sata_port->ap = ap;
  4271. sata_port->res = res;
  4272. res->sata_port = sata_port;
  4273. ap->private_data = sata_port;
  4274. starget->hostdata = sata_port;
  4275. } else {
  4276. kfree(sata_port);
  4277. return -ENOMEM;
  4278. }
  4279. }
  4280. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4281. return 0;
  4282. }
  4283. /**
  4284. * ipr_target_destroy - Destroy a SCSI target
  4285. * @starget: scsi target struct
  4286. *
  4287. * If the device was a SATA device, this function frees the libata
  4288. * ATA port, else it does nothing.
  4289. *
  4290. **/
  4291. static void ipr_target_destroy(struct scsi_target *starget)
  4292. {
  4293. struct ipr_sata_port *sata_port = starget->hostdata;
  4294. struct Scsi_Host *shost = dev_to_shost(&starget->dev);
  4295. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
  4296. if (ioa_cfg->sis64) {
  4297. if (!ipr_find_starget(starget)) {
  4298. if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
  4299. clear_bit(starget->id, ioa_cfg->array_ids);
  4300. else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
  4301. clear_bit(starget->id, ioa_cfg->vset_ids);
  4302. else if (starget->channel == 0)
  4303. clear_bit(starget->id, ioa_cfg->target_ids);
  4304. }
  4305. }
  4306. if (sata_port) {
  4307. starget->hostdata = NULL;
  4308. ata_sas_port_destroy(sata_port->ap);
  4309. kfree(sata_port);
  4310. }
  4311. }
  4312. /**
  4313. * ipr_find_sdev - Find device based on bus/target/lun.
  4314. * @sdev: scsi device struct
  4315. *
  4316. * Return value:
  4317. * resource entry pointer if found / NULL if not found
  4318. **/
  4319. static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
  4320. {
  4321. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
  4322. struct ipr_resource_entry *res;
  4323. list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
  4324. if ((res->bus == sdev->channel) &&
  4325. (res->target == sdev->id) &&
  4326. (res->lun == sdev->lun))
  4327. return res;
  4328. }
  4329. return NULL;
  4330. }
  4331. /**
  4332. * ipr_slave_destroy - Unconfigure a SCSI device
  4333. * @sdev: scsi device struct
  4334. *
  4335. * Return value:
  4336. * nothing
  4337. **/
  4338. static void ipr_slave_destroy(struct scsi_device *sdev)
  4339. {
  4340. struct ipr_resource_entry *res;
  4341. struct ipr_ioa_cfg *ioa_cfg;
  4342. unsigned long lock_flags = 0;
  4343. ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
  4344. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  4345. res = (struct ipr_resource_entry *) sdev->hostdata;
  4346. if (res) {
  4347. if (res->sata_port)
  4348. res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
  4349. sdev->hostdata = NULL;
  4350. res->sdev = NULL;
  4351. res->sata_port = NULL;
  4352. }
  4353. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4354. }
  4355. /**
  4356. * ipr_slave_configure - Configure a SCSI device
  4357. * @sdev: scsi device struct
  4358. *
  4359. * This function configures the specified scsi device.
  4360. *
  4361. * Return value:
  4362. * 0 on success
  4363. **/
  4364. static int ipr_slave_configure(struct scsi_device *sdev)
  4365. {
  4366. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
  4367. struct ipr_resource_entry *res;
  4368. struct ata_port *ap = NULL;
  4369. unsigned long lock_flags = 0;
  4370. char buffer[IPR_MAX_RES_PATH_LENGTH];
  4371. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  4372. res = sdev->hostdata;
  4373. if (res) {
  4374. if (ipr_is_af_dasd_device(res))
  4375. sdev->type = TYPE_RAID;
  4376. if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
  4377. sdev->scsi_level = 4;
  4378. sdev->no_uld_attach = 1;
  4379. }
  4380. if (ipr_is_vset_device(res)) {
  4381. sdev->scsi_level = SCSI_SPC_3;
  4382. sdev->no_report_opcodes = 1;
  4383. blk_queue_rq_timeout(sdev->request_queue,
  4384. IPR_VSET_RW_TIMEOUT);
  4385. blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
  4386. }
  4387. if (ipr_is_gata(res) && res->sata_port)
  4388. ap = res->sata_port->ap;
  4389. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4390. if (ap) {
  4391. scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
  4392. ata_sas_slave_configure(sdev, ap);
  4393. }
  4394. if (ioa_cfg->sis64)
  4395. sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
  4396. ipr_format_res_path(ioa_cfg,
  4397. res->res_path, buffer, sizeof(buffer)));
  4398. return 0;
  4399. }
  4400. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4401. return 0;
  4402. }
  4403. /**
  4404. * ipr_ata_slave_alloc - Prepare for commands to a SATA device
  4405. * @sdev: scsi device struct
  4406. *
  4407. * This function initializes an ATA port so that future commands
  4408. * sent through queuecommand will work.
  4409. *
  4410. * Return value:
  4411. * 0 on success
  4412. **/
  4413. static int ipr_ata_slave_alloc(struct scsi_device *sdev)
  4414. {
  4415. struct ipr_sata_port *sata_port = NULL;
  4416. int rc = -ENXIO;
  4417. ENTER;
  4418. if (sdev->sdev_target)
  4419. sata_port = sdev->sdev_target->hostdata;
  4420. if (sata_port) {
  4421. rc = ata_sas_port_init(sata_port->ap);
  4422. if (rc == 0)
  4423. rc = ata_sas_sync_probe(sata_port->ap);
  4424. }
  4425. if (rc)
  4426. ipr_slave_destroy(sdev);
  4427. LEAVE;
  4428. return rc;
  4429. }
  4430. /**
  4431. * ipr_slave_alloc - Prepare for commands to a device.
  4432. * @sdev: scsi device struct
  4433. *
  4434. * This function saves a pointer to the resource entry
  4435. * in the scsi device struct if the device exists. We
  4436. * can then use this pointer in ipr_queuecommand when
  4437. * handling new commands.
  4438. *
  4439. * Return value:
  4440. * 0 on success / -ENXIO if device does not exist
  4441. **/
  4442. static int ipr_slave_alloc(struct scsi_device *sdev)
  4443. {
  4444. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
  4445. struct ipr_resource_entry *res;
  4446. unsigned long lock_flags;
  4447. int rc = -ENXIO;
  4448. sdev->hostdata = NULL;
  4449. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  4450. res = ipr_find_sdev(sdev);
  4451. if (res) {
  4452. res->sdev = sdev;
  4453. res->add_to_ml = 0;
  4454. res->in_erp = 0;
  4455. sdev->hostdata = res;
  4456. if (!ipr_is_naca_model(res))
  4457. res->needs_sync_complete = 1;
  4458. rc = 0;
  4459. if (ipr_is_gata(res)) {
  4460. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4461. return ipr_ata_slave_alloc(sdev);
  4462. }
  4463. }
  4464. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4465. return rc;
  4466. }
  4467. /**
  4468. * ipr_match_lun - Match function for specified LUN
  4469. * @ipr_cmd: ipr command struct
  4470. * @device: device to match (sdev)
  4471. *
  4472. * Returns:
  4473. * 1 if command matches sdev / 0 if command does not match sdev
  4474. **/
  4475. static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
  4476. {
  4477. if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
  4478. return 1;
  4479. return 0;
  4480. }
  4481. /**
  4482. * ipr_cmnd_is_free - Check if a command is free or not
  4483. * @ipr_cmd: ipr command struct
  4484. *
  4485. * Returns:
  4486. * true / false
  4487. **/
  4488. static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
  4489. {
  4490. struct ipr_cmnd *loop_cmd;
  4491. list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
  4492. if (loop_cmd == ipr_cmd)
  4493. return true;
  4494. }
  4495. return false;
  4496. }
  4497. /**
  4498. * ipr_match_res - Match function for specified resource entry
  4499. * @ipr_cmd: ipr command struct
  4500. * @resource: resource entry to match
  4501. *
  4502. * Returns:
  4503. * 1 if command matches sdev / 0 if command does not match sdev
  4504. **/
  4505. static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
  4506. {
  4507. struct ipr_resource_entry *res = resource;
  4508. if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
  4509. return 1;
  4510. return 0;
  4511. }
  4512. /**
  4513. * ipr_wait_for_ops - Wait for matching commands to complete
  4514. * @ioa_cfg: ioa config struct
  4515. * @device: device to match (sdev)
  4516. * @match: match function to use
  4517. *
  4518. * Returns:
  4519. * SUCCESS / FAILED
  4520. **/
  4521. static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
  4522. int (*match)(struct ipr_cmnd *, void *))
  4523. {
  4524. struct ipr_cmnd *ipr_cmd;
  4525. int wait, i;
  4526. unsigned long flags;
  4527. struct ipr_hrr_queue *hrrq;
  4528. signed long timeout = IPR_ABORT_TASK_TIMEOUT;
  4529. DECLARE_COMPLETION_ONSTACK(comp);
  4530. ENTER;
  4531. do {
  4532. wait = 0;
  4533. for_each_hrrq(hrrq, ioa_cfg) {
  4534. spin_lock_irqsave(hrrq->lock, flags);
  4535. for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
  4536. ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
  4537. if (!ipr_cmnd_is_free(ipr_cmd)) {
  4538. if (match(ipr_cmd, device)) {
  4539. ipr_cmd->eh_comp = &comp;
  4540. wait++;
  4541. }
  4542. }
  4543. }
  4544. spin_unlock_irqrestore(hrrq->lock, flags);
  4545. }
  4546. if (wait) {
  4547. timeout = wait_for_completion_timeout(&comp, timeout);
  4548. if (!timeout) {
  4549. wait = 0;
  4550. for_each_hrrq(hrrq, ioa_cfg) {
  4551. spin_lock_irqsave(hrrq->lock, flags);
  4552. for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
  4553. ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
  4554. if (!ipr_cmnd_is_free(ipr_cmd)) {
  4555. if (match(ipr_cmd, device)) {
  4556. ipr_cmd->eh_comp = NULL;
  4557. wait++;
  4558. }
  4559. }
  4560. }
  4561. spin_unlock_irqrestore(hrrq->lock, flags);
  4562. }
  4563. if (wait)
  4564. dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
  4565. LEAVE;
  4566. return wait ? FAILED : SUCCESS;
  4567. }
  4568. }
  4569. } while (wait);
  4570. LEAVE;
  4571. return SUCCESS;
  4572. }
  4573. static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
  4574. {
  4575. struct ipr_ioa_cfg *ioa_cfg;
  4576. unsigned long lock_flags = 0;
  4577. int rc = SUCCESS;
  4578. ENTER;
  4579. ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
  4580. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  4581. if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
  4582. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
  4583. dev_err(&ioa_cfg->pdev->dev,
  4584. "Adapter being reset as a result of error recovery.\n");
  4585. if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
  4586. ioa_cfg->sdt_state = GET_DUMP;
  4587. }
  4588. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4589. wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
  4590. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  4591. /* If we got hit with a host reset while we were already resetting
  4592. the adapter for some reason, and the reset failed. */
  4593. if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
  4594. ipr_trace;
  4595. rc = FAILED;
  4596. }
  4597. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4598. LEAVE;
  4599. return rc;
  4600. }
  4601. /**
  4602. * ipr_device_reset - Reset the device
  4603. * @ioa_cfg: ioa config struct
  4604. * @res: resource entry struct
  4605. *
  4606. * This function issues a device reset to the affected device.
  4607. * If the device is a SCSI device, a LUN reset will be sent
  4608. * to the device first. If that does not work, a target reset
  4609. * will be sent. If the device is a SATA device, a PHY reset will
  4610. * be sent.
  4611. *
  4612. * Return value:
  4613. * 0 on success / non-zero on failure
  4614. **/
  4615. static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
  4616. struct ipr_resource_entry *res)
  4617. {
  4618. struct ipr_cmnd *ipr_cmd;
  4619. struct ipr_ioarcb *ioarcb;
  4620. struct ipr_cmd_pkt *cmd_pkt;
  4621. struct ipr_ioarcb_ata_regs *regs;
  4622. u32 ioasc;
  4623. ENTER;
  4624. ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
  4625. ioarcb = &ipr_cmd->ioarcb;
  4626. cmd_pkt = &ioarcb->cmd_pkt;
  4627. if (ipr_cmd->ioa_cfg->sis64) {
  4628. regs = &ipr_cmd->i.ata_ioadl.regs;
  4629. ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
  4630. } else
  4631. regs = &ioarcb->u.add_data.u.regs;
  4632. ioarcb->res_handle = res->res_handle;
  4633. cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
  4634. cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
  4635. if (ipr_is_gata(res)) {
  4636. cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
  4637. ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
  4638. regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
  4639. }
  4640. ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
  4641. ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
  4642. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  4643. if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
  4644. if (ipr_cmd->ioa_cfg->sis64)
  4645. memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
  4646. sizeof(struct ipr_ioasa_gata));
  4647. else
  4648. memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
  4649. sizeof(struct ipr_ioasa_gata));
  4650. }
  4651. LEAVE;
  4652. return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
  4653. }
  4654. /**
  4655. * ipr_sata_reset - Reset the SATA port
  4656. * @link: SATA link to reset
  4657. * @classes: class of the attached device
  4658. * @deadline: unused
  4659. *
  4660. * This function issues a SATA phy reset to the affected ATA link.
  4661. *
  4662. * Return value:
  4663. * 0 on success / non-zero on failure
  4664. **/
  4665. static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
  4666. unsigned long deadline)
  4667. {
  4668. struct ipr_sata_port *sata_port = link->ap->private_data;
  4669. struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
  4670. struct ipr_resource_entry *res;
  4671. unsigned long lock_flags = 0;
  4672. int rc = -ENXIO, ret;
  4673. ENTER;
  4674. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  4675. while (ioa_cfg->in_reset_reload) {
  4676. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4677. wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
  4678. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  4679. }
  4680. res = sata_port->res;
  4681. if (res) {
  4682. rc = ipr_device_reset(ioa_cfg, res);
  4683. *classes = res->ata_class;
  4684. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4685. ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
  4686. if (ret != SUCCESS) {
  4687. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  4688. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
  4689. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4690. wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
  4691. }
  4692. } else
  4693. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4694. LEAVE;
  4695. return rc;
  4696. }
  4697. /**
  4698. * __ipr_eh_dev_reset - Reset the device
  4699. * @scsi_cmd: scsi command struct
  4700. *
  4701. * This function issues a device reset to the affected device.
  4702. * A LUN reset will be sent to the device first. If that does
  4703. * not work, a target reset will be sent.
  4704. *
  4705. * Return value:
  4706. * SUCCESS / FAILED
  4707. **/
  4708. static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
  4709. {
  4710. struct ipr_cmnd *ipr_cmd;
  4711. struct ipr_ioa_cfg *ioa_cfg;
  4712. struct ipr_resource_entry *res;
  4713. struct ata_port *ap;
  4714. int rc = 0, i;
  4715. struct ipr_hrr_queue *hrrq;
  4716. ENTER;
  4717. ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
  4718. res = scsi_cmd->device->hostdata;
  4719. /*
  4720. * If we are currently going through reset/reload, return failed. This will force the
  4721. * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
  4722. * reset to complete
  4723. */
  4724. if (ioa_cfg->in_reset_reload)
  4725. return FAILED;
  4726. if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
  4727. return FAILED;
  4728. for_each_hrrq(hrrq, ioa_cfg) {
  4729. spin_lock(&hrrq->_lock);
  4730. for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
  4731. ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
  4732. if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
  4733. if (!ipr_cmd->qc)
  4734. continue;
  4735. if (ipr_cmnd_is_free(ipr_cmd))
  4736. continue;
  4737. ipr_cmd->done = ipr_sata_eh_done;
  4738. if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
  4739. ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
  4740. ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
  4741. }
  4742. }
  4743. }
  4744. spin_unlock(&hrrq->_lock);
  4745. }
  4746. res->resetting_device = 1;
  4747. scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
  4748. if (ipr_is_gata(res) && res->sata_port) {
  4749. ap = res->sata_port->ap;
  4750. spin_unlock_irq(scsi_cmd->device->host->host_lock);
  4751. ata_std_error_handler(ap);
  4752. spin_lock_irq(scsi_cmd->device->host->host_lock);
  4753. } else
  4754. rc = ipr_device_reset(ioa_cfg, res);
  4755. res->resetting_device = 0;
  4756. res->reset_occurred = 1;
  4757. LEAVE;
  4758. return rc ? FAILED : SUCCESS;
  4759. }
  4760. static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
  4761. {
  4762. int rc;
  4763. struct ipr_ioa_cfg *ioa_cfg;
  4764. struct ipr_resource_entry *res;
  4765. ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
  4766. res = cmd->device->hostdata;
  4767. if (!res)
  4768. return FAILED;
  4769. spin_lock_irq(cmd->device->host->host_lock);
  4770. rc = __ipr_eh_dev_reset(cmd);
  4771. spin_unlock_irq(cmd->device->host->host_lock);
  4772. if (rc == SUCCESS) {
  4773. if (ipr_is_gata(res) && res->sata_port)
  4774. rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
  4775. else
  4776. rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
  4777. }
  4778. return rc;
  4779. }
  4780. /**
  4781. * ipr_bus_reset_done - Op done function for bus reset.
  4782. * @ipr_cmd: ipr command struct
  4783. *
  4784. * This function is the op done function for a bus reset
  4785. *
  4786. * Return value:
  4787. * none
  4788. **/
  4789. static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
  4790. {
  4791. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  4792. struct ipr_resource_entry *res;
  4793. ENTER;
  4794. if (!ioa_cfg->sis64)
  4795. list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
  4796. if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
  4797. scsi_report_bus_reset(ioa_cfg->host, res->bus);
  4798. break;
  4799. }
  4800. }
  4801. /*
  4802. * If abort has not completed, indicate the reset has, else call the
  4803. * abort's done function to wake the sleeping eh thread
  4804. */
  4805. if (ipr_cmd->sibling->sibling)
  4806. ipr_cmd->sibling->sibling = NULL;
  4807. else
  4808. ipr_cmd->sibling->done(ipr_cmd->sibling);
  4809. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  4810. LEAVE;
  4811. }
  4812. /**
  4813. * ipr_abort_timeout - An abort task has timed out
  4814. * @t: Timer context used to fetch ipr command struct
  4815. *
  4816. * This function handles when an abort task times out. If this
  4817. * happens we issue a bus reset since we have resources tied
  4818. * up that must be freed before returning to the midlayer.
  4819. *
  4820. * Return value:
  4821. * none
  4822. **/
  4823. static void ipr_abort_timeout(struct timer_list *t)
  4824. {
  4825. struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
  4826. struct ipr_cmnd *reset_cmd;
  4827. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  4828. struct ipr_cmd_pkt *cmd_pkt;
  4829. unsigned long lock_flags = 0;
  4830. ENTER;
  4831. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  4832. if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
  4833. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4834. return;
  4835. }
  4836. sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
  4837. reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
  4838. ipr_cmd->sibling = reset_cmd;
  4839. reset_cmd->sibling = ipr_cmd;
  4840. reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
  4841. cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
  4842. cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
  4843. cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
  4844. cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
  4845. ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
  4846. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  4847. LEAVE;
  4848. }
  4849. /**
  4850. * ipr_cancel_op - Cancel specified op
  4851. * @scsi_cmd: scsi command struct
  4852. *
  4853. * This function cancels specified op.
  4854. *
  4855. * Return value:
  4856. * SUCCESS / FAILED
  4857. **/
  4858. static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
  4859. {
  4860. struct ipr_cmnd *ipr_cmd;
  4861. struct ipr_ioa_cfg *ioa_cfg;
  4862. struct ipr_resource_entry *res;
  4863. struct ipr_cmd_pkt *cmd_pkt;
  4864. u32 ioasc;
  4865. int i, op_found = 0;
  4866. struct ipr_hrr_queue *hrrq;
  4867. ENTER;
  4868. ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
  4869. res = scsi_cmd->device->hostdata;
  4870. /* If we are currently going through reset/reload, return failed.
  4871. * This will force the mid-layer to call ipr_eh_host_reset,
  4872. * which will then go to sleep and wait for the reset to complete
  4873. */
  4874. if (ioa_cfg->in_reset_reload ||
  4875. ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
  4876. return FAILED;
  4877. if (!res)
  4878. return FAILED;
  4879. /*
  4880. * If we are aborting a timed out op, chances are that the timeout was caused
  4881. * by a still not detected EEH error. In such cases, reading a register will
  4882. * trigger the EEH recovery infrastructure.
  4883. */
  4884. readl(ioa_cfg->regs.sense_interrupt_reg);
  4885. if (!ipr_is_gscsi(res))
  4886. return FAILED;
  4887. for_each_hrrq(hrrq, ioa_cfg) {
  4888. spin_lock(&hrrq->_lock);
  4889. for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
  4890. if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
  4891. if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
  4892. op_found = 1;
  4893. break;
  4894. }
  4895. }
  4896. }
  4897. spin_unlock(&hrrq->_lock);
  4898. }
  4899. if (!op_found)
  4900. return SUCCESS;
  4901. ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
  4902. ipr_cmd->ioarcb.res_handle = res->res_handle;
  4903. cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
  4904. cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
  4905. cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
  4906. ipr_cmd->u.sdev = scsi_cmd->device;
  4907. scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
  4908. scsi_cmd->cmnd[0]);
  4909. ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
  4910. ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
  4911. /*
  4912. * If the abort task timed out and we sent a bus reset, we will get
  4913. * one the following responses to the abort
  4914. */
  4915. if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
  4916. ioasc = 0;
  4917. ipr_trace;
  4918. }
  4919. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  4920. if (!ipr_is_naca_model(res))
  4921. res->needs_sync_complete = 1;
  4922. LEAVE;
  4923. return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
  4924. }
  4925. /**
  4926. * ipr_scan_finished - Report whether scan is done
  4927. * @shost: scsi host struct
  4928. * @elapsed_time: elapsed time
  4929. *
  4930. * Return value:
  4931. * 0 if scan in progress / 1 if scan is complete
  4932. **/
  4933. static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
  4934. {
  4935. unsigned long lock_flags;
  4936. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
  4937. int rc = 0;
  4938. spin_lock_irqsave(shost->host_lock, lock_flags);
  4939. if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
  4940. rc = 1;
  4941. if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
  4942. rc = 1;
  4943. spin_unlock_irqrestore(shost->host_lock, lock_flags);
  4944. return rc;
  4945. }
  4946. /**
  4947. * ipr_eh_abort - Reset the host adapter
  4948. * @scsi_cmd: scsi command struct
  4949. *
  4950. * Return value:
  4951. * SUCCESS / FAILED
  4952. **/
  4953. static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
  4954. {
  4955. unsigned long flags;
  4956. int rc;
  4957. struct ipr_ioa_cfg *ioa_cfg;
  4958. ENTER;
  4959. ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
  4960. spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
  4961. rc = ipr_cancel_op(scsi_cmd);
  4962. spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
  4963. if (rc == SUCCESS)
  4964. rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
  4965. LEAVE;
  4966. return rc;
  4967. }
  4968. /**
  4969. * ipr_handle_other_interrupt - Handle "other" interrupts
  4970. * @ioa_cfg: ioa config struct
  4971. * @int_reg: interrupt register
  4972. *
  4973. * Return value:
  4974. * IRQ_NONE / IRQ_HANDLED
  4975. **/
  4976. static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
  4977. u32 int_reg)
  4978. {
  4979. irqreturn_t rc = IRQ_HANDLED;
  4980. u32 int_mask_reg;
  4981. int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
  4982. int_reg &= ~int_mask_reg;
  4983. /* If an interrupt on the adapter did not occur, ignore it.
  4984. * Or in the case of SIS 64, check for a stage change interrupt.
  4985. */
  4986. if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
  4987. if (ioa_cfg->sis64) {
  4988. int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
  4989. int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
  4990. if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
  4991. /* clear stage change */
  4992. writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
  4993. int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
  4994. list_del(&ioa_cfg->reset_cmd->queue);
  4995. del_timer(&ioa_cfg->reset_cmd->timer);
  4996. ipr_reset_ioa_job(ioa_cfg->reset_cmd);
  4997. return IRQ_HANDLED;
  4998. }
  4999. }
  5000. return IRQ_NONE;
  5001. }
  5002. if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
  5003. /* Mask the interrupt */
  5004. writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
  5005. int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
  5006. list_del(&ioa_cfg->reset_cmd->queue);
  5007. del_timer(&ioa_cfg->reset_cmd->timer);
  5008. ipr_reset_ioa_job(ioa_cfg->reset_cmd);
  5009. } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
  5010. if (ioa_cfg->clear_isr) {
  5011. if (ipr_debug && printk_ratelimit())
  5012. dev_err(&ioa_cfg->pdev->dev,
  5013. "Spurious interrupt detected. 0x%08X\n", int_reg);
  5014. writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
  5015. int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
  5016. return IRQ_NONE;
  5017. }
  5018. } else {
  5019. if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
  5020. ioa_cfg->ioa_unit_checked = 1;
  5021. else if (int_reg & IPR_PCII_NO_HOST_RRQ)
  5022. dev_err(&ioa_cfg->pdev->dev,
  5023. "No Host RRQ. 0x%08X\n", int_reg);
  5024. else
  5025. dev_err(&ioa_cfg->pdev->dev,
  5026. "Permanent IOA failure. 0x%08X\n", int_reg);
  5027. if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
  5028. ioa_cfg->sdt_state = GET_DUMP;
  5029. ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
  5030. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
  5031. }
  5032. return rc;
  5033. }
  5034. /**
  5035. * ipr_isr_eh - Interrupt service routine error handler
  5036. * @ioa_cfg: ioa config struct
  5037. * @msg: message to log
  5038. * @number: various meanings depending on the caller/message
  5039. *
  5040. * Return value:
  5041. * none
  5042. **/
  5043. static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
  5044. {
  5045. ioa_cfg->errors_logged++;
  5046. dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
  5047. if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
  5048. ioa_cfg->sdt_state = GET_DUMP;
  5049. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
  5050. }
  5051. static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
  5052. struct list_head *doneq)
  5053. {
  5054. u32 ioasc;
  5055. u16 cmd_index;
  5056. struct ipr_cmnd *ipr_cmd;
  5057. struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
  5058. int num_hrrq = 0;
  5059. /* If interrupts are disabled, ignore the interrupt */
  5060. if (!hrr_queue->allow_interrupts)
  5061. return 0;
  5062. while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
  5063. hrr_queue->toggle_bit) {
  5064. cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
  5065. IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
  5066. IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
  5067. if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
  5068. cmd_index < hrr_queue->min_cmd_id)) {
  5069. ipr_isr_eh(ioa_cfg,
  5070. "Invalid response handle from IOA: ",
  5071. cmd_index);
  5072. break;
  5073. }
  5074. ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
  5075. ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
  5076. ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
  5077. list_move_tail(&ipr_cmd->queue, doneq);
  5078. if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
  5079. hrr_queue->hrrq_curr++;
  5080. } else {
  5081. hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
  5082. hrr_queue->toggle_bit ^= 1u;
  5083. }
  5084. num_hrrq++;
  5085. if (budget > 0 && num_hrrq >= budget)
  5086. break;
  5087. }
  5088. return num_hrrq;
  5089. }
  5090. static int ipr_iopoll(struct irq_poll *iop, int budget)
  5091. {
  5092. struct ipr_hrr_queue *hrrq;
  5093. struct ipr_cmnd *ipr_cmd, *temp;
  5094. unsigned long hrrq_flags;
  5095. int completed_ops;
  5096. LIST_HEAD(doneq);
  5097. hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
  5098. spin_lock_irqsave(hrrq->lock, hrrq_flags);
  5099. completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
  5100. if (completed_ops < budget)
  5101. irq_poll_complete(iop);
  5102. spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
  5103. list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
  5104. list_del(&ipr_cmd->queue);
  5105. del_timer(&ipr_cmd->timer);
  5106. ipr_cmd->fast_done(ipr_cmd);
  5107. }
  5108. return completed_ops;
  5109. }
  5110. /**
  5111. * ipr_isr - Interrupt service routine
  5112. * @irq: irq number
  5113. * @devp: pointer to ioa config struct
  5114. *
  5115. * Return value:
  5116. * IRQ_NONE / IRQ_HANDLED
  5117. **/
  5118. static irqreturn_t ipr_isr(int irq, void *devp)
  5119. {
  5120. struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
  5121. struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
  5122. unsigned long hrrq_flags = 0;
  5123. u32 int_reg = 0;
  5124. int num_hrrq = 0;
  5125. int irq_none = 0;
  5126. struct ipr_cmnd *ipr_cmd, *temp;
  5127. irqreturn_t rc = IRQ_NONE;
  5128. LIST_HEAD(doneq);
  5129. spin_lock_irqsave(hrrq->lock, hrrq_flags);
  5130. /* If interrupts are disabled, ignore the interrupt */
  5131. if (!hrrq->allow_interrupts) {
  5132. spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
  5133. return IRQ_NONE;
  5134. }
  5135. while (1) {
  5136. if (ipr_process_hrrq(hrrq, -1, &doneq)) {
  5137. rc = IRQ_HANDLED;
  5138. if (!ioa_cfg->clear_isr)
  5139. break;
  5140. /* Clear the PCI interrupt */
  5141. num_hrrq = 0;
  5142. do {
  5143. writel(IPR_PCII_HRRQ_UPDATED,
  5144. ioa_cfg->regs.clr_interrupt_reg32);
  5145. int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
  5146. } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
  5147. num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
  5148. } else if (rc == IRQ_NONE && irq_none == 0) {
  5149. int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
  5150. irq_none++;
  5151. } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
  5152. int_reg & IPR_PCII_HRRQ_UPDATED) {
  5153. ipr_isr_eh(ioa_cfg,
  5154. "Error clearing HRRQ: ", num_hrrq);
  5155. rc = IRQ_HANDLED;
  5156. break;
  5157. } else
  5158. break;
  5159. }
  5160. if (unlikely(rc == IRQ_NONE))
  5161. rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
  5162. spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
  5163. list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
  5164. list_del(&ipr_cmd->queue);
  5165. del_timer(&ipr_cmd->timer);
  5166. ipr_cmd->fast_done(ipr_cmd);
  5167. }
  5168. return rc;
  5169. }
  5170. /**
  5171. * ipr_isr_mhrrq - Interrupt service routine
  5172. * @irq: irq number
  5173. * @devp: pointer to ioa config struct
  5174. *
  5175. * Return value:
  5176. * IRQ_NONE / IRQ_HANDLED
  5177. **/
  5178. static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
  5179. {
  5180. struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
  5181. struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
  5182. unsigned long hrrq_flags = 0;
  5183. struct ipr_cmnd *ipr_cmd, *temp;
  5184. irqreturn_t rc = IRQ_NONE;
  5185. LIST_HEAD(doneq);
  5186. spin_lock_irqsave(hrrq->lock, hrrq_flags);
  5187. /* If interrupts are disabled, ignore the interrupt */
  5188. if (!hrrq->allow_interrupts) {
  5189. spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
  5190. return IRQ_NONE;
  5191. }
  5192. if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
  5193. if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
  5194. hrrq->toggle_bit) {
  5195. irq_poll_sched(&hrrq->iopoll);
  5196. spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
  5197. return IRQ_HANDLED;
  5198. }
  5199. } else {
  5200. if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
  5201. hrrq->toggle_bit)
  5202. if (ipr_process_hrrq(hrrq, -1, &doneq))
  5203. rc = IRQ_HANDLED;
  5204. }
  5205. spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
  5206. list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
  5207. list_del(&ipr_cmd->queue);
  5208. del_timer(&ipr_cmd->timer);
  5209. ipr_cmd->fast_done(ipr_cmd);
  5210. }
  5211. return rc;
  5212. }
  5213. /**
  5214. * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
  5215. * @ioa_cfg: ioa config struct
  5216. * @ipr_cmd: ipr command struct
  5217. *
  5218. * Return value:
  5219. * 0 on success / -1 on failure
  5220. **/
  5221. static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
  5222. struct ipr_cmnd *ipr_cmd)
  5223. {
  5224. int i, nseg;
  5225. struct scatterlist *sg;
  5226. u32 length;
  5227. u32 ioadl_flags = 0;
  5228. struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
  5229. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  5230. struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
  5231. length = scsi_bufflen(scsi_cmd);
  5232. if (!length)
  5233. return 0;
  5234. nseg = scsi_dma_map(scsi_cmd);
  5235. if (nseg < 0) {
  5236. if (printk_ratelimit())
  5237. dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
  5238. return -1;
  5239. }
  5240. ipr_cmd->dma_use_sg = nseg;
  5241. ioarcb->data_transfer_length = cpu_to_be32(length);
  5242. ioarcb->ioadl_len =
  5243. cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
  5244. if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
  5245. ioadl_flags = IPR_IOADL_FLAGS_WRITE;
  5246. ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
  5247. } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
  5248. ioadl_flags = IPR_IOADL_FLAGS_READ;
  5249. scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
  5250. ioadl64[i].flags = cpu_to_be32(ioadl_flags);
  5251. ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
  5252. ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
  5253. }
  5254. ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
  5255. return 0;
  5256. }
  5257. /**
  5258. * ipr_build_ioadl - Build a scatter/gather list and map the buffer
  5259. * @ioa_cfg: ioa config struct
  5260. * @ipr_cmd: ipr command struct
  5261. *
  5262. * Return value:
  5263. * 0 on success / -1 on failure
  5264. **/
  5265. static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
  5266. struct ipr_cmnd *ipr_cmd)
  5267. {
  5268. int i, nseg;
  5269. struct scatterlist *sg;
  5270. u32 length;
  5271. u32 ioadl_flags = 0;
  5272. struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
  5273. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  5274. struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
  5275. length = scsi_bufflen(scsi_cmd);
  5276. if (!length)
  5277. return 0;
  5278. nseg = scsi_dma_map(scsi_cmd);
  5279. if (nseg < 0) {
  5280. dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
  5281. return -1;
  5282. }
  5283. ipr_cmd->dma_use_sg = nseg;
  5284. if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
  5285. ioadl_flags = IPR_IOADL_FLAGS_WRITE;
  5286. ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
  5287. ioarcb->data_transfer_length = cpu_to_be32(length);
  5288. ioarcb->ioadl_len =
  5289. cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
  5290. } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
  5291. ioadl_flags = IPR_IOADL_FLAGS_READ;
  5292. ioarcb->read_data_transfer_length = cpu_to_be32(length);
  5293. ioarcb->read_ioadl_len =
  5294. cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
  5295. }
  5296. if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
  5297. ioadl = ioarcb->u.add_data.u.ioadl;
  5298. ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
  5299. offsetof(struct ipr_ioarcb, u.add_data));
  5300. ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
  5301. }
  5302. scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
  5303. ioadl[i].flags_and_data_len =
  5304. cpu_to_be32(ioadl_flags | sg_dma_len(sg));
  5305. ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
  5306. }
  5307. ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
  5308. return 0;
  5309. }
  5310. /**
  5311. * __ipr_erp_done - Process completion of ERP for a device
  5312. * @ipr_cmd: ipr command struct
  5313. *
  5314. * This function copies the sense buffer into the scsi_cmd
  5315. * struct and pushes the scsi_done function.
  5316. *
  5317. * Return value:
  5318. * nothing
  5319. **/
  5320. static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
  5321. {
  5322. struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
  5323. struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
  5324. u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
  5325. if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
  5326. scsi_cmd->result |= (DID_ERROR << 16);
  5327. scmd_printk(KERN_ERR, scsi_cmd,
  5328. "Request Sense failed with IOASC: 0x%08X\n", ioasc);
  5329. } else {
  5330. memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
  5331. SCSI_SENSE_BUFFERSIZE);
  5332. }
  5333. if (res) {
  5334. if (!ipr_is_naca_model(res))
  5335. res->needs_sync_complete = 1;
  5336. res->in_erp = 0;
  5337. }
  5338. scsi_dma_unmap(ipr_cmd->scsi_cmd);
  5339. scsi_done(scsi_cmd);
  5340. if (ipr_cmd->eh_comp)
  5341. complete(ipr_cmd->eh_comp);
  5342. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  5343. }
  5344. /**
  5345. * ipr_erp_done - Process completion of ERP for a device
  5346. * @ipr_cmd: ipr command struct
  5347. *
  5348. * This function copies the sense buffer into the scsi_cmd
  5349. * struct and pushes the scsi_done function.
  5350. *
  5351. * Return value:
  5352. * nothing
  5353. **/
  5354. static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
  5355. {
  5356. struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
  5357. unsigned long hrrq_flags;
  5358. spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
  5359. __ipr_erp_done(ipr_cmd);
  5360. spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
  5361. }
  5362. /**
  5363. * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
  5364. * @ipr_cmd: ipr command struct
  5365. *
  5366. * Return value:
  5367. * none
  5368. **/
  5369. static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
  5370. {
  5371. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  5372. struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
  5373. dma_addr_t dma_addr = ipr_cmd->dma_addr;
  5374. memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
  5375. ioarcb->data_transfer_length = 0;
  5376. ioarcb->read_data_transfer_length = 0;
  5377. ioarcb->ioadl_len = 0;
  5378. ioarcb->read_ioadl_len = 0;
  5379. ioasa->hdr.ioasc = 0;
  5380. ioasa->hdr.residual_data_len = 0;
  5381. if (ipr_cmd->ioa_cfg->sis64)
  5382. ioarcb->u.sis64_addr_data.data_ioadl_addr =
  5383. cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
  5384. else {
  5385. ioarcb->write_ioadl_addr =
  5386. cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
  5387. ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
  5388. }
  5389. }
  5390. /**
  5391. * __ipr_erp_request_sense - Send request sense to a device
  5392. * @ipr_cmd: ipr command struct
  5393. *
  5394. * This function sends a request sense to a device as a result
  5395. * of a check condition.
  5396. *
  5397. * Return value:
  5398. * nothing
  5399. **/
  5400. static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
  5401. {
  5402. struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
  5403. u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
  5404. if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
  5405. __ipr_erp_done(ipr_cmd);
  5406. return;
  5407. }
  5408. ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
  5409. cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
  5410. cmd_pkt->cdb[0] = REQUEST_SENSE;
  5411. cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
  5412. cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
  5413. cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
  5414. cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
  5415. ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
  5416. SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
  5417. ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
  5418. IPR_REQUEST_SENSE_TIMEOUT * 2);
  5419. }
  5420. /**
  5421. * ipr_erp_request_sense - Send request sense to a device
  5422. * @ipr_cmd: ipr command struct
  5423. *
  5424. * This function sends a request sense to a device as a result
  5425. * of a check condition.
  5426. *
  5427. * Return value:
  5428. * nothing
  5429. **/
  5430. static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
  5431. {
  5432. struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
  5433. unsigned long hrrq_flags;
  5434. spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
  5435. __ipr_erp_request_sense(ipr_cmd);
  5436. spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
  5437. }
  5438. /**
  5439. * ipr_erp_cancel_all - Send cancel all to a device
  5440. * @ipr_cmd: ipr command struct
  5441. *
  5442. * This function sends a cancel all to a device to clear the
  5443. * queue. If we are running TCQ on the device, QERR is set to 1,
  5444. * which means all outstanding ops have been dropped on the floor.
  5445. * Cancel all will return them to us.
  5446. *
  5447. * Return value:
  5448. * nothing
  5449. **/
  5450. static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
  5451. {
  5452. struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
  5453. struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
  5454. struct ipr_cmd_pkt *cmd_pkt;
  5455. res->in_erp = 1;
  5456. ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
  5457. if (!scsi_cmd->device->simple_tags) {
  5458. __ipr_erp_request_sense(ipr_cmd);
  5459. return;
  5460. }
  5461. cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
  5462. cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
  5463. cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
  5464. ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
  5465. IPR_CANCEL_ALL_TIMEOUT);
  5466. }
  5467. /**
  5468. * ipr_dump_ioasa - Dump contents of IOASA
  5469. * @ioa_cfg: ioa config struct
  5470. * @ipr_cmd: ipr command struct
  5471. * @res: resource entry struct
  5472. *
  5473. * This function is invoked by the interrupt handler when ops
  5474. * fail. It will log the IOASA if appropriate. Only called
  5475. * for GPDD ops.
  5476. *
  5477. * Return value:
  5478. * none
  5479. **/
  5480. static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
  5481. struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
  5482. {
  5483. int i;
  5484. u16 data_len;
  5485. u32 ioasc, fd_ioasc;
  5486. struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
  5487. __be32 *ioasa_data = (__be32 *)ioasa;
  5488. int error_index;
  5489. ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
  5490. fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
  5491. if (0 == ioasc)
  5492. return;
  5493. if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
  5494. return;
  5495. if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
  5496. error_index = ipr_get_error(fd_ioasc);
  5497. else
  5498. error_index = ipr_get_error(ioasc);
  5499. if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
  5500. /* Don't log an error if the IOA already logged one */
  5501. if (ioasa->hdr.ilid != 0)
  5502. return;
  5503. if (!ipr_is_gscsi(res))
  5504. return;
  5505. if (ipr_error_table[error_index].log_ioasa == 0)
  5506. return;
  5507. }
  5508. ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
  5509. data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
  5510. if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
  5511. data_len = sizeof(struct ipr_ioasa64);
  5512. else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
  5513. data_len = sizeof(struct ipr_ioasa);
  5514. ipr_err("IOASA Dump:\n");
  5515. for (i = 0; i < data_len / 4; i += 4) {
  5516. ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
  5517. be32_to_cpu(ioasa_data[i]),
  5518. be32_to_cpu(ioasa_data[i+1]),
  5519. be32_to_cpu(ioasa_data[i+2]),
  5520. be32_to_cpu(ioasa_data[i+3]));
  5521. }
  5522. }
  5523. /**
  5524. * ipr_gen_sense - Generate SCSI sense data from an IOASA
  5525. * @ipr_cmd: ipr command struct
  5526. *
  5527. * Return value:
  5528. * none
  5529. **/
  5530. static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
  5531. {
  5532. u32 failing_lba;
  5533. u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
  5534. struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
  5535. struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
  5536. u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
  5537. memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
  5538. if (ioasc >= IPR_FIRST_DRIVER_IOASC)
  5539. return;
  5540. ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
  5541. if (ipr_is_vset_device(res) &&
  5542. ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
  5543. ioasa->u.vset.failing_lba_hi != 0) {
  5544. sense_buf[0] = 0x72;
  5545. sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
  5546. sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
  5547. sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
  5548. sense_buf[7] = 12;
  5549. sense_buf[8] = 0;
  5550. sense_buf[9] = 0x0A;
  5551. sense_buf[10] = 0x80;
  5552. failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
  5553. sense_buf[12] = (failing_lba & 0xff000000) >> 24;
  5554. sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
  5555. sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
  5556. sense_buf[15] = failing_lba & 0x000000ff;
  5557. failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
  5558. sense_buf[16] = (failing_lba & 0xff000000) >> 24;
  5559. sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
  5560. sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
  5561. sense_buf[19] = failing_lba & 0x000000ff;
  5562. } else {
  5563. sense_buf[0] = 0x70;
  5564. sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
  5565. sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
  5566. sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
  5567. /* Illegal request */
  5568. if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
  5569. (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
  5570. sense_buf[7] = 10; /* additional length */
  5571. /* IOARCB was in error */
  5572. if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
  5573. sense_buf[15] = 0xC0;
  5574. else /* Parameter data was invalid */
  5575. sense_buf[15] = 0x80;
  5576. sense_buf[16] =
  5577. ((IPR_FIELD_POINTER_MASK &
  5578. be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
  5579. sense_buf[17] =
  5580. (IPR_FIELD_POINTER_MASK &
  5581. be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
  5582. } else {
  5583. if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
  5584. if (ipr_is_vset_device(res))
  5585. failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
  5586. else
  5587. failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
  5588. sense_buf[0] |= 0x80; /* Or in the Valid bit */
  5589. sense_buf[3] = (failing_lba & 0xff000000) >> 24;
  5590. sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
  5591. sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
  5592. sense_buf[6] = failing_lba & 0x000000ff;
  5593. }
  5594. sense_buf[7] = 6; /* additional length */
  5595. }
  5596. }
  5597. }
  5598. /**
  5599. * ipr_get_autosense - Copy autosense data to sense buffer
  5600. * @ipr_cmd: ipr command struct
  5601. *
  5602. * This function copies the autosense buffer to the buffer
  5603. * in the scsi_cmd, if there is autosense available.
  5604. *
  5605. * Return value:
  5606. * 1 if autosense was available / 0 if not
  5607. **/
  5608. static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
  5609. {
  5610. struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
  5611. struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
  5612. if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
  5613. return 0;
  5614. if (ipr_cmd->ioa_cfg->sis64)
  5615. memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
  5616. min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
  5617. SCSI_SENSE_BUFFERSIZE));
  5618. else
  5619. memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
  5620. min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
  5621. SCSI_SENSE_BUFFERSIZE));
  5622. return 1;
  5623. }
  5624. /**
  5625. * ipr_erp_start - Process an error response for a SCSI op
  5626. * @ioa_cfg: ioa config struct
  5627. * @ipr_cmd: ipr command struct
  5628. *
  5629. * This function determines whether or not to initiate ERP
  5630. * on the affected device.
  5631. *
  5632. * Return value:
  5633. * nothing
  5634. **/
  5635. static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
  5636. struct ipr_cmnd *ipr_cmd)
  5637. {
  5638. struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
  5639. struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
  5640. u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
  5641. u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
  5642. if (!res) {
  5643. __ipr_scsi_eh_done(ipr_cmd);
  5644. return;
  5645. }
  5646. if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
  5647. ipr_gen_sense(ipr_cmd);
  5648. ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
  5649. switch (masked_ioasc) {
  5650. case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
  5651. if (ipr_is_naca_model(res))
  5652. scsi_cmd->result |= (DID_ABORT << 16);
  5653. else
  5654. scsi_cmd->result |= (DID_IMM_RETRY << 16);
  5655. break;
  5656. case IPR_IOASC_IR_RESOURCE_HANDLE:
  5657. case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
  5658. scsi_cmd->result |= (DID_NO_CONNECT << 16);
  5659. break;
  5660. case IPR_IOASC_HW_SEL_TIMEOUT:
  5661. scsi_cmd->result |= (DID_NO_CONNECT << 16);
  5662. if (!ipr_is_naca_model(res))
  5663. res->needs_sync_complete = 1;
  5664. break;
  5665. case IPR_IOASC_SYNC_REQUIRED:
  5666. if (!res->in_erp)
  5667. res->needs_sync_complete = 1;
  5668. scsi_cmd->result |= (DID_IMM_RETRY << 16);
  5669. break;
  5670. case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
  5671. case IPR_IOASA_IR_DUAL_IOA_DISABLED:
  5672. /*
  5673. * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
  5674. * so SCSI mid-layer and upper layers handle it accordingly.
  5675. */
  5676. if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
  5677. scsi_cmd->result |= (DID_PASSTHROUGH << 16);
  5678. break;
  5679. case IPR_IOASC_BUS_WAS_RESET:
  5680. case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
  5681. /*
  5682. * Report the bus reset and ask for a retry. The device
  5683. * will give CC/UA the next command.
  5684. */
  5685. if (!res->resetting_device)
  5686. scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
  5687. scsi_cmd->result |= (DID_ERROR << 16);
  5688. if (!ipr_is_naca_model(res))
  5689. res->needs_sync_complete = 1;
  5690. break;
  5691. case IPR_IOASC_HW_DEV_BUS_STATUS:
  5692. scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
  5693. if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
  5694. if (!ipr_get_autosense(ipr_cmd)) {
  5695. if (!ipr_is_naca_model(res)) {
  5696. ipr_erp_cancel_all(ipr_cmd);
  5697. return;
  5698. }
  5699. }
  5700. }
  5701. if (!ipr_is_naca_model(res))
  5702. res->needs_sync_complete = 1;
  5703. break;
  5704. case IPR_IOASC_NR_INIT_CMD_REQUIRED:
  5705. break;
  5706. case IPR_IOASC_IR_NON_OPTIMIZED:
  5707. if (res->raw_mode) {
  5708. res->raw_mode = 0;
  5709. scsi_cmd->result |= (DID_IMM_RETRY << 16);
  5710. } else
  5711. scsi_cmd->result |= (DID_ERROR << 16);
  5712. break;
  5713. default:
  5714. if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
  5715. scsi_cmd->result |= (DID_ERROR << 16);
  5716. if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
  5717. res->needs_sync_complete = 1;
  5718. break;
  5719. }
  5720. scsi_dma_unmap(ipr_cmd->scsi_cmd);
  5721. scsi_done(scsi_cmd);
  5722. if (ipr_cmd->eh_comp)
  5723. complete(ipr_cmd->eh_comp);
  5724. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  5725. }
  5726. /**
  5727. * ipr_scsi_done - mid-layer done function
  5728. * @ipr_cmd: ipr command struct
  5729. *
  5730. * This function is invoked by the interrupt handler for
  5731. * ops generated by the SCSI mid-layer
  5732. *
  5733. * Return value:
  5734. * none
  5735. **/
  5736. static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
  5737. {
  5738. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  5739. struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
  5740. u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
  5741. unsigned long lock_flags;
  5742. scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
  5743. if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
  5744. scsi_dma_unmap(scsi_cmd);
  5745. spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
  5746. scsi_done(scsi_cmd);
  5747. if (ipr_cmd->eh_comp)
  5748. complete(ipr_cmd->eh_comp);
  5749. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  5750. spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
  5751. } else {
  5752. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  5753. spin_lock(&ipr_cmd->hrrq->_lock);
  5754. ipr_erp_start(ioa_cfg, ipr_cmd);
  5755. spin_unlock(&ipr_cmd->hrrq->_lock);
  5756. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  5757. }
  5758. }
  5759. /**
  5760. * ipr_queuecommand - Queue a mid-layer request
  5761. * @shost: scsi host struct
  5762. * @scsi_cmd: scsi command struct
  5763. *
  5764. * This function queues a request generated by the mid-layer.
  5765. *
  5766. * Return value:
  5767. * 0 on success
  5768. * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
  5769. * SCSI_MLQUEUE_HOST_BUSY if host is busy
  5770. **/
  5771. static int ipr_queuecommand(struct Scsi_Host *shost,
  5772. struct scsi_cmnd *scsi_cmd)
  5773. {
  5774. struct ipr_ioa_cfg *ioa_cfg;
  5775. struct ipr_resource_entry *res;
  5776. struct ipr_ioarcb *ioarcb;
  5777. struct ipr_cmnd *ipr_cmd;
  5778. unsigned long hrrq_flags, lock_flags;
  5779. int rc;
  5780. struct ipr_hrr_queue *hrrq;
  5781. int hrrq_id;
  5782. ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
  5783. scsi_cmd->result = (DID_OK << 16);
  5784. res = scsi_cmd->device->hostdata;
  5785. if (ipr_is_gata(res) && res->sata_port) {
  5786. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  5787. rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
  5788. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  5789. return rc;
  5790. }
  5791. hrrq_id = ipr_get_hrrq_index(ioa_cfg);
  5792. hrrq = &ioa_cfg->hrrq[hrrq_id];
  5793. spin_lock_irqsave(hrrq->lock, hrrq_flags);
  5794. /*
  5795. * We are currently blocking all devices due to a host reset
  5796. * We have told the host to stop giving us new requests, but
  5797. * ERP ops don't count. FIXME
  5798. */
  5799. if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
  5800. spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
  5801. return SCSI_MLQUEUE_HOST_BUSY;
  5802. }
  5803. /*
  5804. * FIXME - Create scsi_set_host_offline interface
  5805. * and the ioa_is_dead check can be removed
  5806. */
  5807. if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
  5808. spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
  5809. goto err_nodev;
  5810. }
  5811. ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
  5812. if (ipr_cmd == NULL) {
  5813. spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
  5814. return SCSI_MLQUEUE_HOST_BUSY;
  5815. }
  5816. spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
  5817. ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
  5818. ioarcb = &ipr_cmd->ioarcb;
  5819. memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
  5820. ipr_cmd->scsi_cmd = scsi_cmd;
  5821. ipr_cmd->done = ipr_scsi_eh_done;
  5822. if (ipr_is_gscsi(res)) {
  5823. if (scsi_cmd->underflow == 0)
  5824. ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
  5825. if (res->reset_occurred) {
  5826. res->reset_occurred = 0;
  5827. ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
  5828. }
  5829. }
  5830. if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
  5831. ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
  5832. ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
  5833. if (scsi_cmd->flags & SCMD_TAGGED)
  5834. ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
  5835. else
  5836. ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
  5837. }
  5838. if (scsi_cmd->cmnd[0] >= 0xC0 &&
  5839. (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
  5840. ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
  5841. }
  5842. if (res->raw_mode && ipr_is_af_dasd_device(res)) {
  5843. ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
  5844. if (scsi_cmd->underflow == 0)
  5845. ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
  5846. }
  5847. if (ioa_cfg->sis64)
  5848. rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
  5849. else
  5850. rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
  5851. spin_lock_irqsave(hrrq->lock, hrrq_flags);
  5852. if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
  5853. list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
  5854. spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
  5855. if (!rc)
  5856. scsi_dma_unmap(scsi_cmd);
  5857. return SCSI_MLQUEUE_HOST_BUSY;
  5858. }
  5859. if (unlikely(hrrq->ioa_is_dead)) {
  5860. list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
  5861. spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
  5862. scsi_dma_unmap(scsi_cmd);
  5863. goto err_nodev;
  5864. }
  5865. ioarcb->res_handle = res->res_handle;
  5866. if (res->needs_sync_complete) {
  5867. ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
  5868. res->needs_sync_complete = 0;
  5869. }
  5870. list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
  5871. ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
  5872. ipr_send_command(ipr_cmd);
  5873. spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
  5874. return 0;
  5875. err_nodev:
  5876. spin_lock_irqsave(hrrq->lock, hrrq_flags);
  5877. memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  5878. scsi_cmd->result = (DID_NO_CONNECT << 16);
  5879. scsi_done(scsi_cmd);
  5880. spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
  5881. return 0;
  5882. }
  5883. /**
  5884. * ipr_ioctl - IOCTL handler
  5885. * @sdev: scsi device struct
  5886. * @cmd: IOCTL cmd
  5887. * @arg: IOCTL arg
  5888. *
  5889. * Return value:
  5890. * 0 on success / other on failure
  5891. **/
  5892. static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
  5893. void __user *arg)
  5894. {
  5895. struct ipr_resource_entry *res;
  5896. res = (struct ipr_resource_entry *)sdev->hostdata;
  5897. if (res && ipr_is_gata(res)) {
  5898. if (cmd == HDIO_GET_IDENTITY)
  5899. return -ENOTTY;
  5900. return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
  5901. }
  5902. return -EINVAL;
  5903. }
  5904. /**
  5905. * ipr_ioa_info - Get information about the card/driver
  5906. * @host: scsi host struct
  5907. *
  5908. * Return value:
  5909. * pointer to buffer with description string
  5910. **/
  5911. static const char *ipr_ioa_info(struct Scsi_Host *host)
  5912. {
  5913. static char buffer[512];
  5914. struct ipr_ioa_cfg *ioa_cfg;
  5915. unsigned long lock_flags = 0;
  5916. ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
  5917. spin_lock_irqsave(host->host_lock, lock_flags);
  5918. sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
  5919. spin_unlock_irqrestore(host->host_lock, lock_flags);
  5920. return buffer;
  5921. }
  5922. static struct scsi_host_template driver_template = {
  5923. .module = THIS_MODULE,
  5924. .name = "IPR",
  5925. .info = ipr_ioa_info,
  5926. .ioctl = ipr_ioctl,
  5927. #ifdef CONFIG_COMPAT
  5928. .compat_ioctl = ipr_ioctl,
  5929. #endif
  5930. .queuecommand = ipr_queuecommand,
  5931. .dma_need_drain = ata_scsi_dma_need_drain,
  5932. .eh_abort_handler = ipr_eh_abort,
  5933. .eh_device_reset_handler = ipr_eh_dev_reset,
  5934. .eh_host_reset_handler = ipr_eh_host_reset,
  5935. .slave_alloc = ipr_slave_alloc,
  5936. .slave_configure = ipr_slave_configure,
  5937. .slave_destroy = ipr_slave_destroy,
  5938. .scan_finished = ipr_scan_finished,
  5939. .target_alloc = ipr_target_alloc,
  5940. .target_destroy = ipr_target_destroy,
  5941. .change_queue_depth = ipr_change_queue_depth,
  5942. .bios_param = ipr_biosparam,
  5943. .can_queue = IPR_MAX_COMMANDS,
  5944. .this_id = -1,
  5945. .sg_tablesize = IPR_MAX_SGLIST,
  5946. .max_sectors = IPR_IOA_MAX_SECTORS,
  5947. .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
  5948. .shost_groups = ipr_ioa_groups,
  5949. .sdev_groups = ipr_dev_groups,
  5950. .proc_name = IPR_NAME,
  5951. };
  5952. /**
  5953. * ipr_ata_phy_reset - libata phy_reset handler
  5954. * @ap: ata port to reset
  5955. *
  5956. **/
  5957. static void ipr_ata_phy_reset(struct ata_port *ap)
  5958. {
  5959. unsigned long flags;
  5960. struct ipr_sata_port *sata_port = ap->private_data;
  5961. struct ipr_resource_entry *res = sata_port->res;
  5962. struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
  5963. int rc;
  5964. ENTER;
  5965. spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
  5966. while (ioa_cfg->in_reset_reload) {
  5967. spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
  5968. wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
  5969. spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
  5970. }
  5971. if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
  5972. goto out_unlock;
  5973. rc = ipr_device_reset(ioa_cfg, res);
  5974. if (rc) {
  5975. ap->link.device[0].class = ATA_DEV_NONE;
  5976. goto out_unlock;
  5977. }
  5978. ap->link.device[0].class = res->ata_class;
  5979. if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
  5980. ap->link.device[0].class = ATA_DEV_NONE;
  5981. out_unlock:
  5982. spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
  5983. LEAVE;
  5984. }
  5985. /**
  5986. * ipr_ata_post_internal - Cleanup after an internal command
  5987. * @qc: ATA queued command
  5988. *
  5989. * Return value:
  5990. * none
  5991. **/
  5992. static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
  5993. {
  5994. struct ipr_sata_port *sata_port = qc->ap->private_data;
  5995. struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
  5996. struct ipr_cmnd *ipr_cmd;
  5997. struct ipr_hrr_queue *hrrq;
  5998. unsigned long flags;
  5999. spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
  6000. while (ioa_cfg->in_reset_reload) {
  6001. spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
  6002. wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
  6003. spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
  6004. }
  6005. for_each_hrrq(hrrq, ioa_cfg) {
  6006. spin_lock(&hrrq->_lock);
  6007. list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
  6008. if (ipr_cmd->qc == qc) {
  6009. ipr_device_reset(ioa_cfg, sata_port->res);
  6010. break;
  6011. }
  6012. }
  6013. spin_unlock(&hrrq->_lock);
  6014. }
  6015. spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
  6016. }
  6017. /**
  6018. * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
  6019. * @regs: destination
  6020. * @tf: source ATA taskfile
  6021. *
  6022. * Return value:
  6023. * none
  6024. **/
  6025. static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
  6026. struct ata_taskfile *tf)
  6027. {
  6028. regs->feature = tf->feature;
  6029. regs->nsect = tf->nsect;
  6030. regs->lbal = tf->lbal;
  6031. regs->lbam = tf->lbam;
  6032. regs->lbah = tf->lbah;
  6033. regs->device = tf->device;
  6034. regs->command = tf->command;
  6035. regs->hob_feature = tf->hob_feature;
  6036. regs->hob_nsect = tf->hob_nsect;
  6037. regs->hob_lbal = tf->hob_lbal;
  6038. regs->hob_lbam = tf->hob_lbam;
  6039. regs->hob_lbah = tf->hob_lbah;
  6040. regs->ctl = tf->ctl;
  6041. }
  6042. /**
  6043. * ipr_sata_done - done function for SATA commands
  6044. * @ipr_cmd: ipr command struct
  6045. *
  6046. * This function is invoked by the interrupt handler for
  6047. * ops generated by the SCSI mid-layer to SATA devices
  6048. *
  6049. * Return value:
  6050. * none
  6051. **/
  6052. static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
  6053. {
  6054. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  6055. struct ata_queued_cmd *qc = ipr_cmd->qc;
  6056. struct ipr_sata_port *sata_port = qc->ap->private_data;
  6057. struct ipr_resource_entry *res = sata_port->res;
  6058. u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
  6059. spin_lock(&ipr_cmd->hrrq->_lock);
  6060. if (ipr_cmd->ioa_cfg->sis64)
  6061. memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
  6062. sizeof(struct ipr_ioasa_gata));
  6063. else
  6064. memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
  6065. sizeof(struct ipr_ioasa_gata));
  6066. ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
  6067. if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
  6068. scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
  6069. if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
  6070. qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
  6071. else
  6072. qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
  6073. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  6074. spin_unlock(&ipr_cmd->hrrq->_lock);
  6075. ata_qc_complete(qc);
  6076. }
  6077. /**
  6078. * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
  6079. * @ipr_cmd: ipr command struct
  6080. * @qc: ATA queued command
  6081. *
  6082. **/
  6083. static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
  6084. struct ata_queued_cmd *qc)
  6085. {
  6086. u32 ioadl_flags = 0;
  6087. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  6088. struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
  6089. struct ipr_ioadl64_desc *last_ioadl64 = NULL;
  6090. int len = qc->nbytes;
  6091. struct scatterlist *sg;
  6092. unsigned int si;
  6093. dma_addr_t dma_addr = ipr_cmd->dma_addr;
  6094. if (len == 0)
  6095. return;
  6096. if (qc->dma_dir == DMA_TO_DEVICE) {
  6097. ioadl_flags = IPR_IOADL_FLAGS_WRITE;
  6098. ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
  6099. } else if (qc->dma_dir == DMA_FROM_DEVICE)
  6100. ioadl_flags = IPR_IOADL_FLAGS_READ;
  6101. ioarcb->data_transfer_length = cpu_to_be32(len);
  6102. ioarcb->ioadl_len =
  6103. cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
  6104. ioarcb->u.sis64_addr_data.data_ioadl_addr =
  6105. cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
  6106. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  6107. ioadl64->flags = cpu_to_be32(ioadl_flags);
  6108. ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
  6109. ioadl64->address = cpu_to_be64(sg_dma_address(sg));
  6110. last_ioadl64 = ioadl64;
  6111. ioadl64++;
  6112. }
  6113. if (likely(last_ioadl64))
  6114. last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
  6115. }
  6116. /**
  6117. * ipr_build_ata_ioadl - Build an ATA scatter/gather list
  6118. * @ipr_cmd: ipr command struct
  6119. * @qc: ATA queued command
  6120. *
  6121. **/
  6122. static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
  6123. struct ata_queued_cmd *qc)
  6124. {
  6125. u32 ioadl_flags = 0;
  6126. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  6127. struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
  6128. struct ipr_ioadl_desc *last_ioadl = NULL;
  6129. int len = qc->nbytes;
  6130. struct scatterlist *sg;
  6131. unsigned int si;
  6132. if (len == 0)
  6133. return;
  6134. if (qc->dma_dir == DMA_TO_DEVICE) {
  6135. ioadl_flags = IPR_IOADL_FLAGS_WRITE;
  6136. ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
  6137. ioarcb->data_transfer_length = cpu_to_be32(len);
  6138. ioarcb->ioadl_len =
  6139. cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
  6140. } else if (qc->dma_dir == DMA_FROM_DEVICE) {
  6141. ioadl_flags = IPR_IOADL_FLAGS_READ;
  6142. ioarcb->read_data_transfer_length = cpu_to_be32(len);
  6143. ioarcb->read_ioadl_len =
  6144. cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
  6145. }
  6146. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  6147. ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
  6148. ioadl->address = cpu_to_be32(sg_dma_address(sg));
  6149. last_ioadl = ioadl;
  6150. ioadl++;
  6151. }
  6152. if (likely(last_ioadl))
  6153. last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
  6154. }
  6155. /**
  6156. * ipr_qc_defer - Get a free ipr_cmd
  6157. * @qc: queued command
  6158. *
  6159. * Return value:
  6160. * 0 if success
  6161. **/
  6162. static int ipr_qc_defer(struct ata_queued_cmd *qc)
  6163. {
  6164. struct ata_port *ap = qc->ap;
  6165. struct ipr_sata_port *sata_port = ap->private_data;
  6166. struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
  6167. struct ipr_cmnd *ipr_cmd;
  6168. struct ipr_hrr_queue *hrrq;
  6169. int hrrq_id;
  6170. hrrq_id = ipr_get_hrrq_index(ioa_cfg);
  6171. hrrq = &ioa_cfg->hrrq[hrrq_id];
  6172. qc->lldd_task = NULL;
  6173. spin_lock(&hrrq->_lock);
  6174. if (unlikely(hrrq->ioa_is_dead)) {
  6175. spin_unlock(&hrrq->_lock);
  6176. return 0;
  6177. }
  6178. if (unlikely(!hrrq->allow_cmds)) {
  6179. spin_unlock(&hrrq->_lock);
  6180. return ATA_DEFER_LINK;
  6181. }
  6182. ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
  6183. if (ipr_cmd == NULL) {
  6184. spin_unlock(&hrrq->_lock);
  6185. return ATA_DEFER_LINK;
  6186. }
  6187. qc->lldd_task = ipr_cmd;
  6188. spin_unlock(&hrrq->_lock);
  6189. return 0;
  6190. }
  6191. /**
  6192. * ipr_qc_issue - Issue a SATA qc to a device
  6193. * @qc: queued command
  6194. *
  6195. * Return value:
  6196. * 0 if success
  6197. **/
  6198. static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
  6199. {
  6200. struct ata_port *ap = qc->ap;
  6201. struct ipr_sata_port *sata_port = ap->private_data;
  6202. struct ipr_resource_entry *res = sata_port->res;
  6203. struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
  6204. struct ipr_cmnd *ipr_cmd;
  6205. struct ipr_ioarcb *ioarcb;
  6206. struct ipr_ioarcb_ata_regs *regs;
  6207. if (qc->lldd_task == NULL)
  6208. ipr_qc_defer(qc);
  6209. ipr_cmd = qc->lldd_task;
  6210. if (ipr_cmd == NULL)
  6211. return AC_ERR_SYSTEM;
  6212. qc->lldd_task = NULL;
  6213. spin_lock(&ipr_cmd->hrrq->_lock);
  6214. if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
  6215. ipr_cmd->hrrq->ioa_is_dead)) {
  6216. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  6217. spin_unlock(&ipr_cmd->hrrq->_lock);
  6218. return AC_ERR_SYSTEM;
  6219. }
  6220. ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
  6221. ioarcb = &ipr_cmd->ioarcb;
  6222. if (ioa_cfg->sis64) {
  6223. regs = &ipr_cmd->i.ata_ioadl.regs;
  6224. ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
  6225. } else
  6226. regs = &ioarcb->u.add_data.u.regs;
  6227. memset(regs, 0, sizeof(*regs));
  6228. ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
  6229. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
  6230. ipr_cmd->qc = qc;
  6231. ipr_cmd->done = ipr_sata_done;
  6232. ipr_cmd->ioarcb.res_handle = res->res_handle;
  6233. ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
  6234. ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
  6235. ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
  6236. ipr_cmd->dma_use_sg = qc->n_elem;
  6237. if (ioa_cfg->sis64)
  6238. ipr_build_ata_ioadl64(ipr_cmd, qc);
  6239. else
  6240. ipr_build_ata_ioadl(ipr_cmd, qc);
  6241. regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
  6242. ipr_copy_sata_tf(regs, &qc->tf);
  6243. memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
  6244. ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
  6245. switch (qc->tf.protocol) {
  6246. case ATA_PROT_NODATA:
  6247. case ATA_PROT_PIO:
  6248. break;
  6249. case ATA_PROT_DMA:
  6250. regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
  6251. break;
  6252. case ATAPI_PROT_PIO:
  6253. case ATAPI_PROT_NODATA:
  6254. regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
  6255. break;
  6256. case ATAPI_PROT_DMA:
  6257. regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
  6258. regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
  6259. break;
  6260. default:
  6261. WARN_ON(1);
  6262. spin_unlock(&ipr_cmd->hrrq->_lock);
  6263. return AC_ERR_INVALID;
  6264. }
  6265. ipr_send_command(ipr_cmd);
  6266. spin_unlock(&ipr_cmd->hrrq->_lock);
  6267. return 0;
  6268. }
  6269. /**
  6270. * ipr_qc_fill_rtf - Read result TF
  6271. * @qc: ATA queued command
  6272. *
  6273. * Return value:
  6274. * true
  6275. **/
  6276. static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
  6277. {
  6278. struct ipr_sata_port *sata_port = qc->ap->private_data;
  6279. struct ipr_ioasa_gata *g = &sata_port->ioasa;
  6280. struct ata_taskfile *tf = &qc->result_tf;
  6281. tf->feature = g->error;
  6282. tf->nsect = g->nsect;
  6283. tf->lbal = g->lbal;
  6284. tf->lbam = g->lbam;
  6285. tf->lbah = g->lbah;
  6286. tf->device = g->device;
  6287. tf->command = g->status;
  6288. tf->hob_nsect = g->hob_nsect;
  6289. tf->hob_lbal = g->hob_lbal;
  6290. tf->hob_lbam = g->hob_lbam;
  6291. tf->hob_lbah = g->hob_lbah;
  6292. return true;
  6293. }
  6294. static struct ata_port_operations ipr_sata_ops = {
  6295. .phy_reset = ipr_ata_phy_reset,
  6296. .hardreset = ipr_sata_reset,
  6297. .post_internal_cmd = ipr_ata_post_internal,
  6298. .qc_prep = ata_noop_qc_prep,
  6299. .qc_defer = ipr_qc_defer,
  6300. .qc_issue = ipr_qc_issue,
  6301. .qc_fill_rtf = ipr_qc_fill_rtf,
  6302. .port_start = ata_sas_port_start,
  6303. .port_stop = ata_sas_port_stop
  6304. };
  6305. static struct ata_port_info sata_port_info = {
  6306. .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
  6307. ATA_FLAG_SAS_HOST,
  6308. .pio_mask = ATA_PIO4_ONLY,
  6309. .mwdma_mask = ATA_MWDMA2,
  6310. .udma_mask = ATA_UDMA6,
  6311. .port_ops = &ipr_sata_ops
  6312. };
  6313. #ifdef CONFIG_PPC_PSERIES
  6314. static const u16 ipr_blocked_processors[] = {
  6315. PVR_NORTHSTAR,
  6316. PVR_PULSAR,
  6317. PVR_POWER4,
  6318. PVR_ICESTAR,
  6319. PVR_SSTAR,
  6320. PVR_POWER4p,
  6321. PVR_630,
  6322. PVR_630p
  6323. };
  6324. /**
  6325. * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
  6326. * @ioa_cfg: ioa cfg struct
  6327. *
  6328. * Adapters that use Gemstone revision < 3.1 do not work reliably on
  6329. * certain pSeries hardware. This function determines if the given
  6330. * adapter is in one of these confgurations or not.
  6331. *
  6332. * Return value:
  6333. * 1 if adapter is not supported / 0 if adapter is supported
  6334. **/
  6335. static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
  6336. {
  6337. int i;
  6338. if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
  6339. for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
  6340. if (pvr_version_is(ipr_blocked_processors[i]))
  6341. return 1;
  6342. }
  6343. }
  6344. return 0;
  6345. }
  6346. #else
  6347. #define ipr_invalid_adapter(ioa_cfg) 0
  6348. #endif
  6349. /**
  6350. * ipr_ioa_bringdown_done - IOA bring down completion.
  6351. * @ipr_cmd: ipr command struct
  6352. *
  6353. * This function processes the completion of an adapter bring down.
  6354. * It wakes any reset sleepers.
  6355. *
  6356. * Return value:
  6357. * IPR_RC_JOB_RETURN
  6358. **/
  6359. static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
  6360. {
  6361. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  6362. int i;
  6363. ENTER;
  6364. if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
  6365. ipr_trace;
  6366. ioa_cfg->scsi_unblock = 1;
  6367. schedule_work(&ioa_cfg->work_q);
  6368. }
  6369. ioa_cfg->in_reset_reload = 0;
  6370. ioa_cfg->reset_retries = 0;
  6371. for (i = 0; i < ioa_cfg->hrrq_num; i++) {
  6372. spin_lock(&ioa_cfg->hrrq[i]._lock);
  6373. ioa_cfg->hrrq[i].ioa_is_dead = 1;
  6374. spin_unlock(&ioa_cfg->hrrq[i]._lock);
  6375. }
  6376. wmb();
  6377. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  6378. wake_up_all(&ioa_cfg->reset_wait_q);
  6379. LEAVE;
  6380. return IPR_RC_JOB_RETURN;
  6381. }
  6382. /**
  6383. * ipr_ioa_reset_done - IOA reset completion.
  6384. * @ipr_cmd: ipr command struct
  6385. *
  6386. * This function processes the completion of an adapter reset.
  6387. * It schedules any necessary mid-layer add/removes and
  6388. * wakes any reset sleepers.
  6389. *
  6390. * Return value:
  6391. * IPR_RC_JOB_RETURN
  6392. **/
  6393. static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
  6394. {
  6395. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  6396. struct ipr_resource_entry *res;
  6397. int j;
  6398. ENTER;
  6399. ioa_cfg->in_reset_reload = 0;
  6400. for (j = 0; j < ioa_cfg->hrrq_num; j++) {
  6401. spin_lock(&ioa_cfg->hrrq[j]._lock);
  6402. ioa_cfg->hrrq[j].allow_cmds = 1;
  6403. spin_unlock(&ioa_cfg->hrrq[j]._lock);
  6404. }
  6405. wmb();
  6406. ioa_cfg->reset_cmd = NULL;
  6407. ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
  6408. list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
  6409. if (res->add_to_ml || res->del_from_ml) {
  6410. ipr_trace;
  6411. break;
  6412. }
  6413. }
  6414. schedule_work(&ioa_cfg->work_q);
  6415. for (j = 0; j < IPR_NUM_HCAMS; j++) {
  6416. list_del_init(&ioa_cfg->hostrcb[j]->queue);
  6417. if (j < IPR_NUM_LOG_HCAMS)
  6418. ipr_send_hcam(ioa_cfg,
  6419. IPR_HCAM_CDB_OP_CODE_LOG_DATA,
  6420. ioa_cfg->hostrcb[j]);
  6421. else
  6422. ipr_send_hcam(ioa_cfg,
  6423. IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
  6424. ioa_cfg->hostrcb[j]);
  6425. }
  6426. scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
  6427. dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
  6428. ioa_cfg->reset_retries = 0;
  6429. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  6430. wake_up_all(&ioa_cfg->reset_wait_q);
  6431. ioa_cfg->scsi_unblock = 1;
  6432. schedule_work(&ioa_cfg->work_q);
  6433. LEAVE;
  6434. return IPR_RC_JOB_RETURN;
  6435. }
  6436. /**
  6437. * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
  6438. * @supported_dev: supported device struct
  6439. * @vpids: vendor product id struct
  6440. *
  6441. * Return value:
  6442. * none
  6443. **/
  6444. static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
  6445. struct ipr_std_inq_vpids *vpids)
  6446. {
  6447. memset(supported_dev, 0, sizeof(struct ipr_supported_device));
  6448. memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
  6449. supported_dev->num_records = 1;
  6450. supported_dev->data_length =
  6451. cpu_to_be16(sizeof(struct ipr_supported_device));
  6452. supported_dev->reserved = 0;
  6453. }
  6454. /**
  6455. * ipr_set_supported_devs - Send Set Supported Devices for a device
  6456. * @ipr_cmd: ipr command struct
  6457. *
  6458. * This function sends a Set Supported Devices to the adapter
  6459. *
  6460. * Return value:
  6461. * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
  6462. **/
  6463. static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
  6464. {
  6465. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  6466. struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
  6467. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  6468. struct ipr_resource_entry *res = ipr_cmd->u.res;
  6469. ipr_cmd->job_step = ipr_ioa_reset_done;
  6470. list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
  6471. if (!ipr_is_scsi_disk(res))
  6472. continue;
  6473. ipr_cmd->u.res = res;
  6474. ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
  6475. ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
  6476. ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
  6477. ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
  6478. ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
  6479. ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
  6480. ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
  6481. ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
  6482. ipr_init_ioadl(ipr_cmd,
  6483. ioa_cfg->vpd_cbs_dma +
  6484. offsetof(struct ipr_misc_cbs, supp_dev),
  6485. sizeof(struct ipr_supported_device),
  6486. IPR_IOADL_FLAGS_WRITE_LAST);
  6487. ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
  6488. IPR_SET_SUP_DEVICE_TIMEOUT);
  6489. if (!ioa_cfg->sis64)
  6490. ipr_cmd->job_step = ipr_set_supported_devs;
  6491. LEAVE;
  6492. return IPR_RC_JOB_RETURN;
  6493. }
  6494. LEAVE;
  6495. return IPR_RC_JOB_CONTINUE;
  6496. }
  6497. /**
  6498. * ipr_get_mode_page - Locate specified mode page
  6499. * @mode_pages: mode page buffer
  6500. * @page_code: page code to find
  6501. * @len: minimum required length for mode page
  6502. *
  6503. * Return value:
  6504. * pointer to mode page / NULL on failure
  6505. **/
  6506. static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
  6507. u32 page_code, u32 len)
  6508. {
  6509. struct ipr_mode_page_hdr *mode_hdr;
  6510. u32 page_length;
  6511. u32 length;
  6512. if (!mode_pages || (mode_pages->hdr.length == 0))
  6513. return NULL;
  6514. length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
  6515. mode_hdr = (struct ipr_mode_page_hdr *)
  6516. (mode_pages->data + mode_pages->hdr.block_desc_len);
  6517. while (length) {
  6518. if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
  6519. if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
  6520. return mode_hdr;
  6521. break;
  6522. } else {
  6523. page_length = (sizeof(struct ipr_mode_page_hdr) +
  6524. mode_hdr->page_length);
  6525. length -= page_length;
  6526. mode_hdr = (struct ipr_mode_page_hdr *)
  6527. ((unsigned long)mode_hdr + page_length);
  6528. }
  6529. }
  6530. return NULL;
  6531. }
  6532. /**
  6533. * ipr_check_term_power - Check for term power errors
  6534. * @ioa_cfg: ioa config struct
  6535. * @mode_pages: IOAFP mode pages buffer
  6536. *
  6537. * Check the IOAFP's mode page 28 for term power errors
  6538. *
  6539. * Return value:
  6540. * nothing
  6541. **/
  6542. static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
  6543. struct ipr_mode_pages *mode_pages)
  6544. {
  6545. int i;
  6546. int entry_length;
  6547. struct ipr_dev_bus_entry *bus;
  6548. struct ipr_mode_page28 *mode_page;
  6549. mode_page = ipr_get_mode_page(mode_pages, 0x28,
  6550. sizeof(struct ipr_mode_page28));
  6551. entry_length = mode_page->entry_length;
  6552. bus = mode_page->bus;
  6553. for (i = 0; i < mode_page->num_entries; i++) {
  6554. if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
  6555. dev_err(&ioa_cfg->pdev->dev,
  6556. "Term power is absent on scsi bus %d\n",
  6557. bus->res_addr.bus);
  6558. }
  6559. bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
  6560. }
  6561. }
  6562. /**
  6563. * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
  6564. * @ioa_cfg: ioa config struct
  6565. *
  6566. * Looks through the config table checking for SES devices. If
  6567. * the SES device is in the SES table indicating a maximum SCSI
  6568. * bus speed, the speed is limited for the bus.
  6569. *
  6570. * Return value:
  6571. * none
  6572. **/
  6573. static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
  6574. {
  6575. u32 max_xfer_rate;
  6576. int i;
  6577. for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
  6578. max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
  6579. ioa_cfg->bus_attr[i].bus_width);
  6580. if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
  6581. ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
  6582. }
  6583. }
  6584. /**
  6585. * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
  6586. * @ioa_cfg: ioa config struct
  6587. * @mode_pages: mode page 28 buffer
  6588. *
  6589. * Updates mode page 28 based on driver configuration
  6590. *
  6591. * Return value:
  6592. * none
  6593. **/
  6594. static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
  6595. struct ipr_mode_pages *mode_pages)
  6596. {
  6597. int i, entry_length;
  6598. struct ipr_dev_bus_entry *bus;
  6599. struct ipr_bus_attributes *bus_attr;
  6600. struct ipr_mode_page28 *mode_page;
  6601. mode_page = ipr_get_mode_page(mode_pages, 0x28,
  6602. sizeof(struct ipr_mode_page28));
  6603. entry_length = mode_page->entry_length;
  6604. /* Loop for each device bus entry */
  6605. for (i = 0, bus = mode_page->bus;
  6606. i < mode_page->num_entries;
  6607. i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
  6608. if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
  6609. dev_err(&ioa_cfg->pdev->dev,
  6610. "Invalid resource address reported: 0x%08X\n",
  6611. IPR_GET_PHYS_LOC(bus->res_addr));
  6612. continue;
  6613. }
  6614. bus_attr = &ioa_cfg->bus_attr[i];
  6615. bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
  6616. bus->bus_width = bus_attr->bus_width;
  6617. bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
  6618. bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
  6619. if (bus_attr->qas_enabled)
  6620. bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
  6621. else
  6622. bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
  6623. }
  6624. }
  6625. /**
  6626. * ipr_build_mode_select - Build a mode select command
  6627. * @ipr_cmd: ipr command struct
  6628. * @res_handle: resource handle to send command to
  6629. * @parm: Byte 2 of Mode Sense command
  6630. * @dma_addr: DMA buffer address
  6631. * @xfer_len: data transfer length
  6632. *
  6633. * Return value:
  6634. * none
  6635. **/
  6636. static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
  6637. __be32 res_handle, u8 parm,
  6638. dma_addr_t dma_addr, u8 xfer_len)
  6639. {
  6640. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  6641. ioarcb->res_handle = res_handle;
  6642. ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
  6643. ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
  6644. ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
  6645. ioarcb->cmd_pkt.cdb[1] = parm;
  6646. ioarcb->cmd_pkt.cdb[4] = xfer_len;
  6647. ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
  6648. }
  6649. /**
  6650. * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
  6651. * @ipr_cmd: ipr command struct
  6652. *
  6653. * This function sets up the SCSI bus attributes and sends
  6654. * a Mode Select for Page 28 to activate them.
  6655. *
  6656. * Return value:
  6657. * IPR_RC_JOB_RETURN
  6658. **/
  6659. static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
  6660. {
  6661. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  6662. struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
  6663. int length;
  6664. ENTER;
  6665. ipr_scsi_bus_speed_limit(ioa_cfg);
  6666. ipr_check_term_power(ioa_cfg, mode_pages);
  6667. ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
  6668. length = mode_pages->hdr.length + 1;
  6669. mode_pages->hdr.length = 0;
  6670. ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
  6671. ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
  6672. length);
  6673. ipr_cmd->job_step = ipr_set_supported_devs;
  6674. ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
  6675. struct ipr_resource_entry, queue);
  6676. ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
  6677. LEAVE;
  6678. return IPR_RC_JOB_RETURN;
  6679. }
  6680. /**
  6681. * ipr_build_mode_sense - Builds a mode sense command
  6682. * @ipr_cmd: ipr command struct
  6683. * @res_handle: resource entry struct
  6684. * @parm: Byte 2 of mode sense command
  6685. * @dma_addr: DMA address of mode sense buffer
  6686. * @xfer_len: Size of DMA buffer
  6687. *
  6688. * Return value:
  6689. * none
  6690. **/
  6691. static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
  6692. __be32 res_handle,
  6693. u8 parm, dma_addr_t dma_addr, u8 xfer_len)
  6694. {
  6695. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  6696. ioarcb->res_handle = res_handle;
  6697. ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
  6698. ioarcb->cmd_pkt.cdb[2] = parm;
  6699. ioarcb->cmd_pkt.cdb[4] = xfer_len;
  6700. ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
  6701. ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
  6702. }
  6703. /**
  6704. * ipr_reset_cmd_failed - Handle failure of IOA reset command
  6705. * @ipr_cmd: ipr command struct
  6706. *
  6707. * This function handles the failure of an IOA bringup command.
  6708. *
  6709. * Return value:
  6710. * IPR_RC_JOB_RETURN
  6711. **/
  6712. static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
  6713. {
  6714. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  6715. u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
  6716. dev_err(&ioa_cfg->pdev->dev,
  6717. "0x%02X failed with IOASC: 0x%08X\n",
  6718. ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
  6719. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
  6720. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  6721. return IPR_RC_JOB_RETURN;
  6722. }
  6723. /**
  6724. * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
  6725. * @ipr_cmd: ipr command struct
  6726. *
  6727. * This function handles the failure of a Mode Sense to the IOAFP.
  6728. * Some adapters do not handle all mode pages.
  6729. *
  6730. * Return value:
  6731. * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
  6732. **/
  6733. static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
  6734. {
  6735. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  6736. u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
  6737. if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
  6738. ipr_cmd->job_step = ipr_set_supported_devs;
  6739. ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
  6740. struct ipr_resource_entry, queue);
  6741. return IPR_RC_JOB_CONTINUE;
  6742. }
  6743. return ipr_reset_cmd_failed(ipr_cmd);
  6744. }
  6745. /**
  6746. * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
  6747. * @ipr_cmd: ipr command struct
  6748. *
  6749. * This function send a Page 28 mode sense to the IOA to
  6750. * retrieve SCSI bus attributes.
  6751. *
  6752. * Return value:
  6753. * IPR_RC_JOB_RETURN
  6754. **/
  6755. static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
  6756. {
  6757. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  6758. ENTER;
  6759. ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
  6760. 0x28, ioa_cfg->vpd_cbs_dma +
  6761. offsetof(struct ipr_misc_cbs, mode_pages),
  6762. sizeof(struct ipr_mode_pages));
  6763. ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
  6764. ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
  6765. ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
  6766. LEAVE;
  6767. return IPR_RC_JOB_RETURN;
  6768. }
  6769. /**
  6770. * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
  6771. * @ipr_cmd: ipr command struct
  6772. *
  6773. * This function enables dual IOA RAID support if possible.
  6774. *
  6775. * Return value:
  6776. * IPR_RC_JOB_RETURN
  6777. **/
  6778. static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
  6779. {
  6780. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  6781. struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
  6782. struct ipr_mode_page24 *mode_page;
  6783. int length;
  6784. ENTER;
  6785. mode_page = ipr_get_mode_page(mode_pages, 0x24,
  6786. sizeof(struct ipr_mode_page24));
  6787. if (mode_page)
  6788. mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
  6789. length = mode_pages->hdr.length + 1;
  6790. mode_pages->hdr.length = 0;
  6791. ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
  6792. ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
  6793. length);
  6794. ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
  6795. ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
  6796. LEAVE;
  6797. return IPR_RC_JOB_RETURN;
  6798. }
  6799. /**
  6800. * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
  6801. * @ipr_cmd: ipr command struct
  6802. *
  6803. * This function handles the failure of a Mode Sense to the IOAFP.
  6804. * Some adapters do not handle all mode pages.
  6805. *
  6806. * Return value:
  6807. * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
  6808. **/
  6809. static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
  6810. {
  6811. u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
  6812. if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
  6813. ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
  6814. return IPR_RC_JOB_CONTINUE;
  6815. }
  6816. return ipr_reset_cmd_failed(ipr_cmd);
  6817. }
  6818. /**
  6819. * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
  6820. * @ipr_cmd: ipr command struct
  6821. *
  6822. * This function send a mode sense to the IOA to retrieve
  6823. * the IOA Advanced Function Control mode page.
  6824. *
  6825. * Return value:
  6826. * IPR_RC_JOB_RETURN
  6827. **/
  6828. static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
  6829. {
  6830. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  6831. ENTER;
  6832. ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
  6833. 0x24, ioa_cfg->vpd_cbs_dma +
  6834. offsetof(struct ipr_misc_cbs, mode_pages),
  6835. sizeof(struct ipr_mode_pages));
  6836. ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
  6837. ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
  6838. ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
  6839. LEAVE;
  6840. return IPR_RC_JOB_RETURN;
  6841. }
  6842. /**
  6843. * ipr_init_res_table - Initialize the resource table
  6844. * @ipr_cmd: ipr command struct
  6845. *
  6846. * This function looks through the existing resource table, comparing
  6847. * it with the config table. This function will take care of old/new
  6848. * devices and schedule adding/removing them from the mid-layer
  6849. * as appropriate.
  6850. *
  6851. * Return value:
  6852. * IPR_RC_JOB_CONTINUE
  6853. **/
  6854. static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
  6855. {
  6856. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  6857. struct ipr_resource_entry *res, *temp;
  6858. struct ipr_config_table_entry_wrapper cfgtew;
  6859. int entries, found, flag, i;
  6860. LIST_HEAD(old_res);
  6861. ENTER;
  6862. if (ioa_cfg->sis64)
  6863. flag = ioa_cfg->u.cfg_table64->hdr64.flags;
  6864. else
  6865. flag = ioa_cfg->u.cfg_table->hdr.flags;
  6866. if (flag & IPR_UCODE_DOWNLOAD_REQ)
  6867. dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
  6868. list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
  6869. list_move_tail(&res->queue, &old_res);
  6870. if (ioa_cfg->sis64)
  6871. entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
  6872. else
  6873. entries = ioa_cfg->u.cfg_table->hdr.num_entries;
  6874. for (i = 0; i < entries; i++) {
  6875. if (ioa_cfg->sis64)
  6876. cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
  6877. else
  6878. cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
  6879. found = 0;
  6880. list_for_each_entry_safe(res, temp, &old_res, queue) {
  6881. if (ipr_is_same_device(res, &cfgtew)) {
  6882. list_move_tail(&res->queue, &ioa_cfg->used_res_q);
  6883. found = 1;
  6884. break;
  6885. }
  6886. }
  6887. if (!found) {
  6888. if (list_empty(&ioa_cfg->free_res_q)) {
  6889. dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
  6890. break;
  6891. }
  6892. found = 1;
  6893. res = list_entry(ioa_cfg->free_res_q.next,
  6894. struct ipr_resource_entry, queue);
  6895. list_move_tail(&res->queue, &ioa_cfg->used_res_q);
  6896. ipr_init_res_entry(res, &cfgtew);
  6897. res->add_to_ml = 1;
  6898. } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
  6899. res->sdev->allow_restart = 1;
  6900. if (found)
  6901. ipr_update_res_entry(res, &cfgtew);
  6902. }
  6903. list_for_each_entry_safe(res, temp, &old_res, queue) {
  6904. if (res->sdev) {
  6905. res->del_from_ml = 1;
  6906. res->res_handle = IPR_INVALID_RES_HANDLE;
  6907. list_move_tail(&res->queue, &ioa_cfg->used_res_q);
  6908. }
  6909. }
  6910. list_for_each_entry_safe(res, temp, &old_res, queue) {
  6911. ipr_clear_res_target(res);
  6912. list_move_tail(&res->queue, &ioa_cfg->free_res_q);
  6913. }
  6914. if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
  6915. ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
  6916. else
  6917. ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
  6918. LEAVE;
  6919. return IPR_RC_JOB_CONTINUE;
  6920. }
  6921. /**
  6922. * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
  6923. * @ipr_cmd: ipr command struct
  6924. *
  6925. * This function sends a Query IOA Configuration command
  6926. * to the adapter to retrieve the IOA configuration table.
  6927. *
  6928. * Return value:
  6929. * IPR_RC_JOB_RETURN
  6930. **/
  6931. static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
  6932. {
  6933. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  6934. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  6935. struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
  6936. struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
  6937. ENTER;
  6938. if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
  6939. ioa_cfg->dual_raid = 1;
  6940. dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
  6941. ucode_vpd->major_release, ucode_vpd->card_type,
  6942. ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
  6943. ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
  6944. ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
  6945. ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
  6946. ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
  6947. ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
  6948. ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
  6949. ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
  6950. IPR_IOADL_FLAGS_READ_LAST);
  6951. ipr_cmd->job_step = ipr_init_res_table;
  6952. ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
  6953. LEAVE;
  6954. return IPR_RC_JOB_RETURN;
  6955. }
  6956. static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
  6957. {
  6958. u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
  6959. if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
  6960. return IPR_RC_JOB_CONTINUE;
  6961. return ipr_reset_cmd_failed(ipr_cmd);
  6962. }
  6963. static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
  6964. __be32 res_handle, u8 sa_code)
  6965. {
  6966. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  6967. ioarcb->res_handle = res_handle;
  6968. ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
  6969. ioarcb->cmd_pkt.cdb[1] = sa_code;
  6970. ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
  6971. }
  6972. /**
  6973. * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
  6974. * action
  6975. * @ipr_cmd: ipr command struct
  6976. *
  6977. * Return value:
  6978. * none
  6979. **/
  6980. static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
  6981. {
  6982. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  6983. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  6984. struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
  6985. ENTER;
  6986. ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
  6987. if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
  6988. ipr_build_ioa_service_action(ipr_cmd,
  6989. cpu_to_be32(IPR_IOA_RES_HANDLE),
  6990. IPR_IOA_SA_CHANGE_CACHE_PARAMS);
  6991. ioarcb->cmd_pkt.cdb[2] = 0x40;
  6992. ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
  6993. ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
  6994. IPR_SET_SUP_DEVICE_TIMEOUT);
  6995. LEAVE;
  6996. return IPR_RC_JOB_RETURN;
  6997. }
  6998. LEAVE;
  6999. return IPR_RC_JOB_CONTINUE;
  7000. }
  7001. /**
  7002. * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
  7003. * @ipr_cmd: ipr command struct
  7004. * @flags: flags to send
  7005. * @page: page to inquire
  7006. * @dma_addr: DMA address
  7007. * @xfer_len: transfer data length
  7008. *
  7009. * This utility function sends an inquiry to the adapter.
  7010. *
  7011. * Return value:
  7012. * none
  7013. **/
  7014. static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
  7015. dma_addr_t dma_addr, u8 xfer_len)
  7016. {
  7017. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  7018. ENTER;
  7019. ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
  7020. ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
  7021. ioarcb->cmd_pkt.cdb[0] = INQUIRY;
  7022. ioarcb->cmd_pkt.cdb[1] = flags;
  7023. ioarcb->cmd_pkt.cdb[2] = page;
  7024. ioarcb->cmd_pkt.cdb[4] = xfer_len;
  7025. ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
  7026. ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
  7027. LEAVE;
  7028. }
  7029. /**
  7030. * ipr_inquiry_page_supported - Is the given inquiry page supported
  7031. * @page0: inquiry page 0 buffer
  7032. * @page: page code.
  7033. *
  7034. * This function determines if the specified inquiry page is supported.
  7035. *
  7036. * Return value:
  7037. * 1 if page is supported / 0 if not
  7038. **/
  7039. static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
  7040. {
  7041. int i;
  7042. for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
  7043. if (page0->page[i] == page)
  7044. return 1;
  7045. return 0;
  7046. }
  7047. /**
  7048. * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
  7049. * @ipr_cmd: ipr command struct
  7050. *
  7051. * This function sends a Page 0xC4 inquiry to the adapter
  7052. * to retrieve software VPD information.
  7053. *
  7054. * Return value:
  7055. * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
  7056. **/
  7057. static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
  7058. {
  7059. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7060. struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
  7061. struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
  7062. ENTER;
  7063. ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
  7064. memset(pageC4, 0, sizeof(*pageC4));
  7065. if (ipr_inquiry_page_supported(page0, 0xC4)) {
  7066. ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
  7067. (ioa_cfg->vpd_cbs_dma
  7068. + offsetof(struct ipr_misc_cbs,
  7069. pageC4_data)),
  7070. sizeof(struct ipr_inquiry_pageC4));
  7071. return IPR_RC_JOB_RETURN;
  7072. }
  7073. LEAVE;
  7074. return IPR_RC_JOB_CONTINUE;
  7075. }
  7076. /**
  7077. * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
  7078. * @ipr_cmd: ipr command struct
  7079. *
  7080. * This function sends a Page 0xD0 inquiry to the adapter
  7081. * to retrieve adapter capabilities.
  7082. *
  7083. * Return value:
  7084. * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
  7085. **/
  7086. static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
  7087. {
  7088. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7089. struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
  7090. struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
  7091. ENTER;
  7092. ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
  7093. memset(cap, 0, sizeof(*cap));
  7094. if (ipr_inquiry_page_supported(page0, 0xD0)) {
  7095. ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
  7096. ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
  7097. sizeof(struct ipr_inquiry_cap));
  7098. return IPR_RC_JOB_RETURN;
  7099. }
  7100. LEAVE;
  7101. return IPR_RC_JOB_CONTINUE;
  7102. }
  7103. /**
  7104. * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
  7105. * @ipr_cmd: ipr command struct
  7106. *
  7107. * This function sends a Page 3 inquiry to the adapter
  7108. * to retrieve software VPD information.
  7109. *
  7110. * Return value:
  7111. * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
  7112. **/
  7113. static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
  7114. {
  7115. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7116. ENTER;
  7117. ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
  7118. ipr_ioafp_inquiry(ipr_cmd, 1, 3,
  7119. ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
  7120. sizeof(struct ipr_inquiry_page3));
  7121. LEAVE;
  7122. return IPR_RC_JOB_RETURN;
  7123. }
  7124. /**
  7125. * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
  7126. * @ipr_cmd: ipr command struct
  7127. *
  7128. * This function sends a Page 0 inquiry to the adapter
  7129. * to retrieve supported inquiry pages.
  7130. *
  7131. * Return value:
  7132. * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
  7133. **/
  7134. static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
  7135. {
  7136. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7137. char type[5];
  7138. ENTER;
  7139. /* Grab the type out of the VPD and store it away */
  7140. memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
  7141. type[4] = '\0';
  7142. ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
  7143. if (ipr_invalid_adapter(ioa_cfg)) {
  7144. dev_err(&ioa_cfg->pdev->dev,
  7145. "Adapter not supported in this hardware configuration.\n");
  7146. if (!ipr_testmode) {
  7147. ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
  7148. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
  7149. list_add_tail(&ipr_cmd->queue,
  7150. &ioa_cfg->hrrq->hrrq_free_q);
  7151. return IPR_RC_JOB_RETURN;
  7152. }
  7153. }
  7154. ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
  7155. ipr_ioafp_inquiry(ipr_cmd, 1, 0,
  7156. ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
  7157. sizeof(struct ipr_inquiry_page0));
  7158. LEAVE;
  7159. return IPR_RC_JOB_RETURN;
  7160. }
  7161. /**
  7162. * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
  7163. * @ipr_cmd: ipr command struct
  7164. *
  7165. * This function sends a standard inquiry to the adapter.
  7166. *
  7167. * Return value:
  7168. * IPR_RC_JOB_RETURN
  7169. **/
  7170. static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
  7171. {
  7172. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7173. ENTER;
  7174. ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
  7175. ipr_ioafp_inquiry(ipr_cmd, 0, 0,
  7176. ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
  7177. sizeof(struct ipr_ioa_vpd));
  7178. LEAVE;
  7179. return IPR_RC_JOB_RETURN;
  7180. }
  7181. /**
  7182. * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
  7183. * @ipr_cmd: ipr command struct
  7184. *
  7185. * This function send an Identify Host Request Response Queue
  7186. * command to establish the HRRQ with the adapter.
  7187. *
  7188. * Return value:
  7189. * IPR_RC_JOB_RETURN
  7190. **/
  7191. static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
  7192. {
  7193. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7194. struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
  7195. struct ipr_hrr_queue *hrrq;
  7196. ENTER;
  7197. ipr_cmd->job_step = ipr_ioafp_std_inquiry;
  7198. if (ioa_cfg->identify_hrrq_index == 0)
  7199. dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
  7200. if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
  7201. hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
  7202. ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
  7203. ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
  7204. ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
  7205. if (ioa_cfg->sis64)
  7206. ioarcb->cmd_pkt.cdb[1] = 0x1;
  7207. if (ioa_cfg->nvectors == 1)
  7208. ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
  7209. else
  7210. ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
  7211. ioarcb->cmd_pkt.cdb[2] =
  7212. ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
  7213. ioarcb->cmd_pkt.cdb[3] =
  7214. ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
  7215. ioarcb->cmd_pkt.cdb[4] =
  7216. ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
  7217. ioarcb->cmd_pkt.cdb[5] =
  7218. ((u64) hrrq->host_rrq_dma) & 0xff;
  7219. ioarcb->cmd_pkt.cdb[7] =
  7220. ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
  7221. ioarcb->cmd_pkt.cdb[8] =
  7222. (sizeof(u32) * hrrq->size) & 0xff;
  7223. if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
  7224. ioarcb->cmd_pkt.cdb[9] =
  7225. ioa_cfg->identify_hrrq_index;
  7226. if (ioa_cfg->sis64) {
  7227. ioarcb->cmd_pkt.cdb[10] =
  7228. ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
  7229. ioarcb->cmd_pkt.cdb[11] =
  7230. ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
  7231. ioarcb->cmd_pkt.cdb[12] =
  7232. ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
  7233. ioarcb->cmd_pkt.cdb[13] =
  7234. ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
  7235. }
  7236. if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
  7237. ioarcb->cmd_pkt.cdb[14] =
  7238. ioa_cfg->identify_hrrq_index;
  7239. ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
  7240. IPR_INTERNAL_TIMEOUT);
  7241. if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
  7242. ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
  7243. LEAVE;
  7244. return IPR_RC_JOB_RETURN;
  7245. }
  7246. LEAVE;
  7247. return IPR_RC_JOB_CONTINUE;
  7248. }
  7249. /**
  7250. * ipr_reset_timer_done - Adapter reset timer function
  7251. * @t: Timer context used to fetch ipr command struct
  7252. *
  7253. * Description: This function is used in adapter reset processing
  7254. * for timing events. If the reset_cmd pointer in the IOA
  7255. * config struct is not this adapter's we are doing nested
  7256. * resets and fail_all_ops will take care of freeing the
  7257. * command block.
  7258. *
  7259. * Return value:
  7260. * none
  7261. **/
  7262. static void ipr_reset_timer_done(struct timer_list *t)
  7263. {
  7264. struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
  7265. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7266. unsigned long lock_flags = 0;
  7267. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  7268. if (ioa_cfg->reset_cmd == ipr_cmd) {
  7269. list_del(&ipr_cmd->queue);
  7270. ipr_cmd->done(ipr_cmd);
  7271. }
  7272. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  7273. }
  7274. /**
  7275. * ipr_reset_start_timer - Start a timer for adapter reset job
  7276. * @ipr_cmd: ipr command struct
  7277. * @timeout: timeout value
  7278. *
  7279. * Description: This function is used in adapter reset processing
  7280. * for timing events. If the reset_cmd pointer in the IOA
  7281. * config struct is not this adapter's we are doing nested
  7282. * resets and fail_all_ops will take care of freeing the
  7283. * command block.
  7284. *
  7285. * Return value:
  7286. * none
  7287. **/
  7288. static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
  7289. unsigned long timeout)
  7290. {
  7291. ENTER;
  7292. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
  7293. ipr_cmd->done = ipr_reset_ioa_job;
  7294. ipr_cmd->timer.expires = jiffies + timeout;
  7295. ipr_cmd->timer.function = ipr_reset_timer_done;
  7296. add_timer(&ipr_cmd->timer);
  7297. }
  7298. /**
  7299. * ipr_init_ioa_mem - Initialize ioa_cfg control block
  7300. * @ioa_cfg: ioa cfg struct
  7301. *
  7302. * Return value:
  7303. * nothing
  7304. **/
  7305. static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
  7306. {
  7307. struct ipr_hrr_queue *hrrq;
  7308. for_each_hrrq(hrrq, ioa_cfg) {
  7309. spin_lock(&hrrq->_lock);
  7310. memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
  7311. /* Initialize Host RRQ pointers */
  7312. hrrq->hrrq_start = hrrq->host_rrq;
  7313. hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
  7314. hrrq->hrrq_curr = hrrq->hrrq_start;
  7315. hrrq->toggle_bit = 1;
  7316. spin_unlock(&hrrq->_lock);
  7317. }
  7318. wmb();
  7319. ioa_cfg->identify_hrrq_index = 0;
  7320. if (ioa_cfg->hrrq_num == 1)
  7321. atomic_set(&ioa_cfg->hrrq_index, 0);
  7322. else
  7323. atomic_set(&ioa_cfg->hrrq_index, 1);
  7324. /* Zero out config table */
  7325. memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
  7326. }
  7327. /**
  7328. * ipr_reset_next_stage - Process IPL stage change based on feedback register.
  7329. * @ipr_cmd: ipr command struct
  7330. *
  7331. * Return value:
  7332. * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
  7333. **/
  7334. static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
  7335. {
  7336. unsigned long stage, stage_time;
  7337. u32 feedback;
  7338. volatile u32 int_reg;
  7339. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7340. u64 maskval = 0;
  7341. feedback = readl(ioa_cfg->regs.init_feedback_reg);
  7342. stage = feedback & IPR_IPL_INIT_STAGE_MASK;
  7343. stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
  7344. ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
  7345. /* sanity check the stage_time value */
  7346. if (stage_time == 0)
  7347. stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
  7348. else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
  7349. stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
  7350. else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
  7351. stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
  7352. if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
  7353. writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
  7354. int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
  7355. stage_time = ioa_cfg->transop_timeout;
  7356. ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
  7357. } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
  7358. int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
  7359. if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
  7360. ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
  7361. maskval = IPR_PCII_IPL_STAGE_CHANGE;
  7362. maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
  7363. writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
  7364. int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
  7365. return IPR_RC_JOB_CONTINUE;
  7366. }
  7367. }
  7368. ipr_cmd->timer.expires = jiffies + stage_time * HZ;
  7369. ipr_cmd->timer.function = ipr_oper_timeout;
  7370. ipr_cmd->done = ipr_reset_ioa_job;
  7371. add_timer(&ipr_cmd->timer);
  7372. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
  7373. return IPR_RC_JOB_RETURN;
  7374. }
  7375. /**
  7376. * ipr_reset_enable_ioa - Enable the IOA following a reset.
  7377. * @ipr_cmd: ipr command struct
  7378. *
  7379. * This function reinitializes some control blocks and
  7380. * enables destructive diagnostics on the adapter.
  7381. *
  7382. * Return value:
  7383. * IPR_RC_JOB_RETURN
  7384. **/
  7385. static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
  7386. {
  7387. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7388. volatile u32 int_reg;
  7389. volatile u64 maskval;
  7390. int i;
  7391. ENTER;
  7392. ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
  7393. ipr_init_ioa_mem(ioa_cfg);
  7394. for (i = 0; i < ioa_cfg->hrrq_num; i++) {
  7395. spin_lock(&ioa_cfg->hrrq[i]._lock);
  7396. ioa_cfg->hrrq[i].allow_interrupts = 1;
  7397. spin_unlock(&ioa_cfg->hrrq[i]._lock);
  7398. }
  7399. if (ioa_cfg->sis64) {
  7400. /* Set the adapter to the correct endian mode. */
  7401. writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
  7402. int_reg = readl(ioa_cfg->regs.endian_swap_reg);
  7403. }
  7404. int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
  7405. if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
  7406. writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
  7407. ioa_cfg->regs.clr_interrupt_mask_reg32);
  7408. int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
  7409. return IPR_RC_JOB_CONTINUE;
  7410. }
  7411. /* Enable destructive diagnostics on IOA */
  7412. writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
  7413. if (ioa_cfg->sis64) {
  7414. maskval = IPR_PCII_IPL_STAGE_CHANGE;
  7415. maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
  7416. writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
  7417. } else
  7418. writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
  7419. int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
  7420. dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
  7421. if (ioa_cfg->sis64) {
  7422. ipr_cmd->job_step = ipr_reset_next_stage;
  7423. return IPR_RC_JOB_CONTINUE;
  7424. }
  7425. ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
  7426. ipr_cmd->timer.function = ipr_oper_timeout;
  7427. ipr_cmd->done = ipr_reset_ioa_job;
  7428. add_timer(&ipr_cmd->timer);
  7429. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
  7430. LEAVE;
  7431. return IPR_RC_JOB_RETURN;
  7432. }
  7433. /**
  7434. * ipr_reset_wait_for_dump - Wait for a dump to timeout.
  7435. * @ipr_cmd: ipr command struct
  7436. *
  7437. * This function is invoked when an adapter dump has run out
  7438. * of processing time.
  7439. *
  7440. * Return value:
  7441. * IPR_RC_JOB_CONTINUE
  7442. **/
  7443. static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
  7444. {
  7445. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7446. if (ioa_cfg->sdt_state == GET_DUMP)
  7447. ioa_cfg->sdt_state = WAIT_FOR_DUMP;
  7448. else if (ioa_cfg->sdt_state == READ_DUMP)
  7449. ioa_cfg->sdt_state = ABORT_DUMP;
  7450. ioa_cfg->dump_timeout = 1;
  7451. ipr_cmd->job_step = ipr_reset_alert;
  7452. return IPR_RC_JOB_CONTINUE;
  7453. }
  7454. /**
  7455. * ipr_unit_check_no_data - Log a unit check/no data error log
  7456. * @ioa_cfg: ioa config struct
  7457. *
  7458. * Logs an error indicating the adapter unit checked, but for some
  7459. * reason, we were unable to fetch the unit check buffer.
  7460. *
  7461. * Return value:
  7462. * nothing
  7463. **/
  7464. static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
  7465. {
  7466. ioa_cfg->errors_logged++;
  7467. dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
  7468. }
  7469. /**
  7470. * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
  7471. * @ioa_cfg: ioa config struct
  7472. *
  7473. * Fetches the unit check buffer from the adapter by clocking the data
  7474. * through the mailbox register.
  7475. *
  7476. * Return value:
  7477. * nothing
  7478. **/
  7479. static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
  7480. {
  7481. unsigned long mailbox;
  7482. struct ipr_hostrcb *hostrcb;
  7483. struct ipr_uc_sdt sdt;
  7484. int rc, length;
  7485. u32 ioasc;
  7486. mailbox = readl(ioa_cfg->ioa_mailbox);
  7487. if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
  7488. ipr_unit_check_no_data(ioa_cfg);
  7489. return;
  7490. }
  7491. memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
  7492. rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
  7493. (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
  7494. if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
  7495. ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
  7496. (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
  7497. ipr_unit_check_no_data(ioa_cfg);
  7498. return;
  7499. }
  7500. /* Find length of the first sdt entry (UC buffer) */
  7501. if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
  7502. length = be32_to_cpu(sdt.entry[0].end_token);
  7503. else
  7504. length = (be32_to_cpu(sdt.entry[0].end_token) -
  7505. be32_to_cpu(sdt.entry[0].start_token)) &
  7506. IPR_FMT2_MBX_ADDR_MASK;
  7507. hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
  7508. struct ipr_hostrcb, queue);
  7509. list_del_init(&hostrcb->queue);
  7510. memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
  7511. rc = ipr_get_ldump_data_section(ioa_cfg,
  7512. be32_to_cpu(sdt.entry[0].start_token),
  7513. (__be32 *)&hostrcb->hcam,
  7514. min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
  7515. if (!rc) {
  7516. ipr_handle_log_data(ioa_cfg, hostrcb);
  7517. ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
  7518. if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
  7519. ioa_cfg->sdt_state == GET_DUMP)
  7520. ioa_cfg->sdt_state = WAIT_FOR_DUMP;
  7521. } else
  7522. ipr_unit_check_no_data(ioa_cfg);
  7523. list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
  7524. }
  7525. /**
  7526. * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
  7527. * @ipr_cmd: ipr command struct
  7528. *
  7529. * Description: This function will call to get the unit check buffer.
  7530. *
  7531. * Return value:
  7532. * IPR_RC_JOB_RETURN
  7533. **/
  7534. static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
  7535. {
  7536. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7537. ENTER;
  7538. ioa_cfg->ioa_unit_checked = 0;
  7539. ipr_get_unit_check_buffer(ioa_cfg);
  7540. ipr_cmd->job_step = ipr_reset_alert;
  7541. ipr_reset_start_timer(ipr_cmd, 0);
  7542. LEAVE;
  7543. return IPR_RC_JOB_RETURN;
  7544. }
  7545. static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
  7546. {
  7547. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7548. ENTER;
  7549. if (ioa_cfg->sdt_state != GET_DUMP)
  7550. return IPR_RC_JOB_RETURN;
  7551. if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
  7552. (readl(ioa_cfg->regs.sense_interrupt_reg) &
  7553. IPR_PCII_MAILBOX_STABLE)) {
  7554. if (!ipr_cmd->u.time_left)
  7555. dev_err(&ioa_cfg->pdev->dev,
  7556. "Timed out waiting for Mailbox register.\n");
  7557. ioa_cfg->sdt_state = READ_DUMP;
  7558. ioa_cfg->dump_timeout = 0;
  7559. if (ioa_cfg->sis64)
  7560. ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
  7561. else
  7562. ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
  7563. ipr_cmd->job_step = ipr_reset_wait_for_dump;
  7564. schedule_work(&ioa_cfg->work_q);
  7565. } else {
  7566. ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
  7567. ipr_reset_start_timer(ipr_cmd,
  7568. IPR_CHECK_FOR_RESET_TIMEOUT);
  7569. }
  7570. LEAVE;
  7571. return IPR_RC_JOB_RETURN;
  7572. }
  7573. /**
  7574. * ipr_reset_restore_cfg_space - Restore PCI config space.
  7575. * @ipr_cmd: ipr command struct
  7576. *
  7577. * Description: This function restores the saved PCI config space of
  7578. * the adapter, fails all outstanding ops back to the callers, and
  7579. * fetches the dump/unit check if applicable to this reset.
  7580. *
  7581. * Return value:
  7582. * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
  7583. **/
  7584. static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
  7585. {
  7586. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7587. ENTER;
  7588. ioa_cfg->pdev->state_saved = true;
  7589. pci_restore_state(ioa_cfg->pdev);
  7590. if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
  7591. ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
  7592. return IPR_RC_JOB_CONTINUE;
  7593. }
  7594. ipr_fail_all_ops(ioa_cfg);
  7595. if (ioa_cfg->sis64) {
  7596. /* Set the adapter to the correct endian mode. */
  7597. writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
  7598. readl(ioa_cfg->regs.endian_swap_reg);
  7599. }
  7600. if (ioa_cfg->ioa_unit_checked) {
  7601. if (ioa_cfg->sis64) {
  7602. ipr_cmd->job_step = ipr_reset_get_unit_check_job;
  7603. ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
  7604. return IPR_RC_JOB_RETURN;
  7605. } else {
  7606. ioa_cfg->ioa_unit_checked = 0;
  7607. ipr_get_unit_check_buffer(ioa_cfg);
  7608. ipr_cmd->job_step = ipr_reset_alert;
  7609. ipr_reset_start_timer(ipr_cmd, 0);
  7610. return IPR_RC_JOB_RETURN;
  7611. }
  7612. }
  7613. if (ioa_cfg->in_ioa_bringdown) {
  7614. ipr_cmd->job_step = ipr_ioa_bringdown_done;
  7615. } else if (ioa_cfg->sdt_state == GET_DUMP) {
  7616. ipr_cmd->job_step = ipr_dump_mailbox_wait;
  7617. ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
  7618. } else {
  7619. ipr_cmd->job_step = ipr_reset_enable_ioa;
  7620. }
  7621. LEAVE;
  7622. return IPR_RC_JOB_CONTINUE;
  7623. }
  7624. /**
  7625. * ipr_reset_bist_done - BIST has completed on the adapter.
  7626. * @ipr_cmd: ipr command struct
  7627. *
  7628. * Description: Unblock config space and resume the reset process.
  7629. *
  7630. * Return value:
  7631. * IPR_RC_JOB_CONTINUE
  7632. **/
  7633. static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
  7634. {
  7635. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7636. ENTER;
  7637. if (ioa_cfg->cfg_locked)
  7638. pci_cfg_access_unlock(ioa_cfg->pdev);
  7639. ioa_cfg->cfg_locked = 0;
  7640. ipr_cmd->job_step = ipr_reset_restore_cfg_space;
  7641. LEAVE;
  7642. return IPR_RC_JOB_CONTINUE;
  7643. }
  7644. /**
  7645. * ipr_reset_start_bist - Run BIST on the adapter.
  7646. * @ipr_cmd: ipr command struct
  7647. *
  7648. * Description: This function runs BIST on the adapter, then delays 2 seconds.
  7649. *
  7650. * Return value:
  7651. * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
  7652. **/
  7653. static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
  7654. {
  7655. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7656. int rc = PCIBIOS_SUCCESSFUL;
  7657. ENTER;
  7658. if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
  7659. writel(IPR_UPROCI_SIS64_START_BIST,
  7660. ioa_cfg->regs.set_uproc_interrupt_reg32);
  7661. else
  7662. rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
  7663. if (rc == PCIBIOS_SUCCESSFUL) {
  7664. ipr_cmd->job_step = ipr_reset_bist_done;
  7665. ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
  7666. rc = IPR_RC_JOB_RETURN;
  7667. } else {
  7668. if (ioa_cfg->cfg_locked)
  7669. pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
  7670. ioa_cfg->cfg_locked = 0;
  7671. ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
  7672. rc = IPR_RC_JOB_CONTINUE;
  7673. }
  7674. LEAVE;
  7675. return rc;
  7676. }
  7677. /**
  7678. * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
  7679. * @ipr_cmd: ipr command struct
  7680. *
  7681. * Description: This clears PCI reset to the adapter and delays two seconds.
  7682. *
  7683. * Return value:
  7684. * IPR_RC_JOB_RETURN
  7685. **/
  7686. static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
  7687. {
  7688. ENTER;
  7689. ipr_cmd->job_step = ipr_reset_bist_done;
  7690. ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
  7691. LEAVE;
  7692. return IPR_RC_JOB_RETURN;
  7693. }
  7694. /**
  7695. * ipr_reset_reset_work - Pulse a PCIe fundamental reset
  7696. * @work: work struct
  7697. *
  7698. * Description: This pulses warm reset to a slot.
  7699. *
  7700. **/
  7701. static void ipr_reset_reset_work(struct work_struct *work)
  7702. {
  7703. struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
  7704. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7705. struct pci_dev *pdev = ioa_cfg->pdev;
  7706. unsigned long lock_flags = 0;
  7707. ENTER;
  7708. pci_set_pcie_reset_state(pdev, pcie_warm_reset);
  7709. msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
  7710. pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
  7711. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  7712. if (ioa_cfg->reset_cmd == ipr_cmd)
  7713. ipr_reset_ioa_job(ipr_cmd);
  7714. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  7715. LEAVE;
  7716. }
  7717. /**
  7718. * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
  7719. * @ipr_cmd: ipr command struct
  7720. *
  7721. * Description: This asserts PCI reset to the adapter.
  7722. *
  7723. * Return value:
  7724. * IPR_RC_JOB_RETURN
  7725. **/
  7726. static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
  7727. {
  7728. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7729. ENTER;
  7730. INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
  7731. queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
  7732. ipr_cmd->job_step = ipr_reset_slot_reset_done;
  7733. LEAVE;
  7734. return IPR_RC_JOB_RETURN;
  7735. }
  7736. /**
  7737. * ipr_reset_block_config_access_wait - Wait for permission to block config access
  7738. * @ipr_cmd: ipr command struct
  7739. *
  7740. * Description: This attempts to block config access to the IOA.
  7741. *
  7742. * Return value:
  7743. * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
  7744. **/
  7745. static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
  7746. {
  7747. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7748. int rc = IPR_RC_JOB_CONTINUE;
  7749. if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
  7750. ioa_cfg->cfg_locked = 1;
  7751. ipr_cmd->job_step = ioa_cfg->reset;
  7752. } else {
  7753. if (ipr_cmd->u.time_left) {
  7754. rc = IPR_RC_JOB_RETURN;
  7755. ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
  7756. ipr_reset_start_timer(ipr_cmd,
  7757. IPR_CHECK_FOR_RESET_TIMEOUT);
  7758. } else {
  7759. ipr_cmd->job_step = ioa_cfg->reset;
  7760. dev_err(&ioa_cfg->pdev->dev,
  7761. "Timed out waiting to lock config access. Resetting anyway.\n");
  7762. }
  7763. }
  7764. return rc;
  7765. }
  7766. /**
  7767. * ipr_reset_block_config_access - Block config access to the IOA
  7768. * @ipr_cmd: ipr command struct
  7769. *
  7770. * Description: This attempts to block config access to the IOA
  7771. *
  7772. * Return value:
  7773. * IPR_RC_JOB_CONTINUE
  7774. **/
  7775. static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
  7776. {
  7777. ipr_cmd->ioa_cfg->cfg_locked = 0;
  7778. ipr_cmd->job_step = ipr_reset_block_config_access_wait;
  7779. ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
  7780. return IPR_RC_JOB_CONTINUE;
  7781. }
  7782. /**
  7783. * ipr_reset_allowed - Query whether or not IOA can be reset
  7784. * @ioa_cfg: ioa config struct
  7785. *
  7786. * Return value:
  7787. * 0 if reset not allowed / non-zero if reset is allowed
  7788. **/
  7789. static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
  7790. {
  7791. volatile u32 temp_reg;
  7792. temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
  7793. return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
  7794. }
  7795. /**
  7796. * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
  7797. * @ipr_cmd: ipr command struct
  7798. *
  7799. * Description: This function waits for adapter permission to run BIST,
  7800. * then runs BIST. If the adapter does not give permission after a
  7801. * reasonable time, we will reset the adapter anyway. The impact of
  7802. * resetting the adapter without warning the adapter is the risk of
  7803. * losing the persistent error log on the adapter. If the adapter is
  7804. * reset while it is writing to the flash on the adapter, the flash
  7805. * segment will have bad ECC and be zeroed.
  7806. *
  7807. * Return value:
  7808. * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
  7809. **/
  7810. static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
  7811. {
  7812. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7813. int rc = IPR_RC_JOB_RETURN;
  7814. if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
  7815. ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
  7816. ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
  7817. } else {
  7818. ipr_cmd->job_step = ipr_reset_block_config_access;
  7819. rc = IPR_RC_JOB_CONTINUE;
  7820. }
  7821. return rc;
  7822. }
  7823. /**
  7824. * ipr_reset_alert - Alert the adapter of a pending reset
  7825. * @ipr_cmd: ipr command struct
  7826. *
  7827. * Description: This function alerts the adapter that it will be reset.
  7828. * If memory space is not currently enabled, proceed directly
  7829. * to running BIST on the adapter. The timer must always be started
  7830. * so we guarantee we do not run BIST from ipr_isr.
  7831. *
  7832. * Return value:
  7833. * IPR_RC_JOB_RETURN
  7834. **/
  7835. static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
  7836. {
  7837. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7838. u16 cmd_reg;
  7839. int rc;
  7840. ENTER;
  7841. rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
  7842. if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
  7843. ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
  7844. writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
  7845. ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
  7846. } else {
  7847. ipr_cmd->job_step = ipr_reset_block_config_access;
  7848. }
  7849. ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
  7850. ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
  7851. LEAVE;
  7852. return IPR_RC_JOB_RETURN;
  7853. }
  7854. /**
  7855. * ipr_reset_quiesce_done - Complete IOA disconnect
  7856. * @ipr_cmd: ipr command struct
  7857. *
  7858. * Description: Freeze the adapter to complete quiesce processing
  7859. *
  7860. * Return value:
  7861. * IPR_RC_JOB_CONTINUE
  7862. **/
  7863. static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
  7864. {
  7865. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7866. ENTER;
  7867. ipr_cmd->job_step = ipr_ioa_bringdown_done;
  7868. ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
  7869. LEAVE;
  7870. return IPR_RC_JOB_CONTINUE;
  7871. }
  7872. /**
  7873. * ipr_reset_cancel_hcam_done - Check for outstanding commands
  7874. * @ipr_cmd: ipr command struct
  7875. *
  7876. * Description: Ensure nothing is outstanding to the IOA and
  7877. * proceed with IOA disconnect. Otherwise reset the IOA.
  7878. *
  7879. * Return value:
  7880. * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
  7881. **/
  7882. static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
  7883. {
  7884. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7885. struct ipr_cmnd *loop_cmd;
  7886. struct ipr_hrr_queue *hrrq;
  7887. int rc = IPR_RC_JOB_CONTINUE;
  7888. int count = 0;
  7889. ENTER;
  7890. ipr_cmd->job_step = ipr_reset_quiesce_done;
  7891. for_each_hrrq(hrrq, ioa_cfg) {
  7892. spin_lock(&hrrq->_lock);
  7893. list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
  7894. count++;
  7895. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
  7896. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  7897. rc = IPR_RC_JOB_RETURN;
  7898. break;
  7899. }
  7900. spin_unlock(&hrrq->_lock);
  7901. if (count)
  7902. break;
  7903. }
  7904. LEAVE;
  7905. return rc;
  7906. }
  7907. /**
  7908. * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
  7909. * @ipr_cmd: ipr command struct
  7910. *
  7911. * Description: Cancel any oustanding HCAMs to the IOA.
  7912. *
  7913. * Return value:
  7914. * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
  7915. **/
  7916. static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
  7917. {
  7918. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7919. int rc = IPR_RC_JOB_CONTINUE;
  7920. struct ipr_cmd_pkt *cmd_pkt;
  7921. struct ipr_cmnd *hcam_cmd;
  7922. struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
  7923. ENTER;
  7924. ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
  7925. if (!hrrq->ioa_is_dead) {
  7926. if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
  7927. list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
  7928. if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
  7929. continue;
  7930. ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
  7931. ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
  7932. cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
  7933. cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
  7934. cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
  7935. cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
  7936. cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
  7937. cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
  7938. cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
  7939. cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
  7940. cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
  7941. cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
  7942. cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
  7943. cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
  7944. ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
  7945. IPR_CANCEL_TIMEOUT);
  7946. rc = IPR_RC_JOB_RETURN;
  7947. ipr_cmd->job_step = ipr_reset_cancel_hcam;
  7948. break;
  7949. }
  7950. }
  7951. } else
  7952. ipr_cmd->job_step = ipr_reset_alert;
  7953. LEAVE;
  7954. return rc;
  7955. }
  7956. /**
  7957. * ipr_reset_ucode_download_done - Microcode download completion
  7958. * @ipr_cmd: ipr command struct
  7959. *
  7960. * Description: This function unmaps the microcode download buffer.
  7961. *
  7962. * Return value:
  7963. * IPR_RC_JOB_CONTINUE
  7964. **/
  7965. static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
  7966. {
  7967. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7968. struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
  7969. dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
  7970. sglist->num_sg, DMA_TO_DEVICE);
  7971. ipr_cmd->job_step = ipr_reset_alert;
  7972. return IPR_RC_JOB_CONTINUE;
  7973. }
  7974. /**
  7975. * ipr_reset_ucode_download - Download microcode to the adapter
  7976. * @ipr_cmd: ipr command struct
  7977. *
  7978. * Description: This function checks to see if it there is microcode
  7979. * to download to the adapter. If there is, a download is performed.
  7980. *
  7981. * Return value:
  7982. * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
  7983. **/
  7984. static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
  7985. {
  7986. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  7987. struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
  7988. ENTER;
  7989. ipr_cmd->job_step = ipr_reset_alert;
  7990. if (!sglist)
  7991. return IPR_RC_JOB_CONTINUE;
  7992. ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
  7993. ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
  7994. ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
  7995. ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
  7996. ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
  7997. ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
  7998. ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
  7999. if (ioa_cfg->sis64)
  8000. ipr_build_ucode_ioadl64(ipr_cmd, sglist);
  8001. else
  8002. ipr_build_ucode_ioadl(ipr_cmd, sglist);
  8003. ipr_cmd->job_step = ipr_reset_ucode_download_done;
  8004. ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
  8005. IPR_WRITE_BUFFER_TIMEOUT);
  8006. LEAVE;
  8007. return IPR_RC_JOB_RETURN;
  8008. }
  8009. /**
  8010. * ipr_reset_shutdown_ioa - Shutdown the adapter
  8011. * @ipr_cmd: ipr command struct
  8012. *
  8013. * Description: This function issues an adapter shutdown of the
  8014. * specified type to the specified adapter as part of the
  8015. * adapter reset job.
  8016. *
  8017. * Return value:
  8018. * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
  8019. **/
  8020. static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
  8021. {
  8022. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  8023. enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
  8024. unsigned long timeout;
  8025. int rc = IPR_RC_JOB_CONTINUE;
  8026. ENTER;
  8027. if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
  8028. ipr_cmd->job_step = ipr_reset_cancel_hcam;
  8029. else if (shutdown_type != IPR_SHUTDOWN_NONE &&
  8030. !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
  8031. ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
  8032. ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
  8033. ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
  8034. ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
  8035. if (shutdown_type == IPR_SHUTDOWN_NORMAL)
  8036. timeout = IPR_SHUTDOWN_TIMEOUT;
  8037. else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
  8038. timeout = IPR_INTERNAL_TIMEOUT;
  8039. else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
  8040. timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
  8041. else
  8042. timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
  8043. ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
  8044. rc = IPR_RC_JOB_RETURN;
  8045. ipr_cmd->job_step = ipr_reset_ucode_download;
  8046. } else
  8047. ipr_cmd->job_step = ipr_reset_alert;
  8048. LEAVE;
  8049. return rc;
  8050. }
  8051. /**
  8052. * ipr_reset_ioa_job - Adapter reset job
  8053. * @ipr_cmd: ipr command struct
  8054. *
  8055. * Description: This function is the job router for the adapter reset job.
  8056. *
  8057. * Return value:
  8058. * none
  8059. **/
  8060. static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
  8061. {
  8062. u32 rc, ioasc;
  8063. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  8064. do {
  8065. ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
  8066. if (ioa_cfg->reset_cmd != ipr_cmd) {
  8067. /*
  8068. * We are doing nested adapter resets and this is
  8069. * not the current reset job.
  8070. */
  8071. list_add_tail(&ipr_cmd->queue,
  8072. &ipr_cmd->hrrq->hrrq_free_q);
  8073. return;
  8074. }
  8075. if (IPR_IOASC_SENSE_KEY(ioasc)) {
  8076. rc = ipr_cmd->job_step_failed(ipr_cmd);
  8077. if (rc == IPR_RC_JOB_RETURN)
  8078. return;
  8079. }
  8080. ipr_reinit_ipr_cmnd(ipr_cmd);
  8081. ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
  8082. rc = ipr_cmd->job_step(ipr_cmd);
  8083. } while (rc == IPR_RC_JOB_CONTINUE);
  8084. }
  8085. /**
  8086. * _ipr_initiate_ioa_reset - Initiate an adapter reset
  8087. * @ioa_cfg: ioa config struct
  8088. * @job_step: first job step of reset job
  8089. * @shutdown_type: shutdown type
  8090. *
  8091. * Description: This function will initiate the reset of the given adapter
  8092. * starting at the selected job step.
  8093. * If the caller needs to wait on the completion of the reset,
  8094. * the caller must sleep on the reset_wait_q.
  8095. *
  8096. * Return value:
  8097. * none
  8098. **/
  8099. static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
  8100. int (*job_step) (struct ipr_cmnd *),
  8101. enum ipr_shutdown_type shutdown_type)
  8102. {
  8103. struct ipr_cmnd *ipr_cmd;
  8104. int i;
  8105. ioa_cfg->in_reset_reload = 1;
  8106. for (i = 0; i < ioa_cfg->hrrq_num; i++) {
  8107. spin_lock(&ioa_cfg->hrrq[i]._lock);
  8108. ioa_cfg->hrrq[i].allow_cmds = 0;
  8109. spin_unlock(&ioa_cfg->hrrq[i]._lock);
  8110. }
  8111. wmb();
  8112. if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
  8113. ioa_cfg->scsi_unblock = 0;
  8114. ioa_cfg->scsi_blocked = 1;
  8115. scsi_block_requests(ioa_cfg->host);
  8116. }
  8117. ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
  8118. ioa_cfg->reset_cmd = ipr_cmd;
  8119. ipr_cmd->job_step = job_step;
  8120. ipr_cmd->u.shutdown_type = shutdown_type;
  8121. ipr_reset_ioa_job(ipr_cmd);
  8122. }
  8123. /**
  8124. * ipr_initiate_ioa_reset - Initiate an adapter reset
  8125. * @ioa_cfg: ioa config struct
  8126. * @shutdown_type: shutdown type
  8127. *
  8128. * Description: This function will initiate the reset of the given adapter.
  8129. * If the caller needs to wait on the completion of the reset,
  8130. * the caller must sleep on the reset_wait_q.
  8131. *
  8132. * Return value:
  8133. * none
  8134. **/
  8135. static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
  8136. enum ipr_shutdown_type shutdown_type)
  8137. {
  8138. int i;
  8139. if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
  8140. return;
  8141. if (ioa_cfg->in_reset_reload) {
  8142. if (ioa_cfg->sdt_state == GET_DUMP)
  8143. ioa_cfg->sdt_state = WAIT_FOR_DUMP;
  8144. else if (ioa_cfg->sdt_state == READ_DUMP)
  8145. ioa_cfg->sdt_state = ABORT_DUMP;
  8146. }
  8147. if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
  8148. dev_err(&ioa_cfg->pdev->dev,
  8149. "IOA taken offline - error recovery failed\n");
  8150. ioa_cfg->reset_retries = 0;
  8151. for (i = 0; i < ioa_cfg->hrrq_num; i++) {
  8152. spin_lock(&ioa_cfg->hrrq[i]._lock);
  8153. ioa_cfg->hrrq[i].ioa_is_dead = 1;
  8154. spin_unlock(&ioa_cfg->hrrq[i]._lock);
  8155. }
  8156. wmb();
  8157. if (ioa_cfg->in_ioa_bringdown) {
  8158. ioa_cfg->reset_cmd = NULL;
  8159. ioa_cfg->in_reset_reload = 0;
  8160. ipr_fail_all_ops(ioa_cfg);
  8161. wake_up_all(&ioa_cfg->reset_wait_q);
  8162. if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
  8163. ioa_cfg->scsi_unblock = 1;
  8164. schedule_work(&ioa_cfg->work_q);
  8165. }
  8166. return;
  8167. } else {
  8168. ioa_cfg->in_ioa_bringdown = 1;
  8169. shutdown_type = IPR_SHUTDOWN_NONE;
  8170. }
  8171. }
  8172. _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
  8173. shutdown_type);
  8174. }
  8175. /**
  8176. * ipr_reset_freeze - Hold off all I/O activity
  8177. * @ipr_cmd: ipr command struct
  8178. *
  8179. * Description: If the PCI slot is frozen, hold off all I/O
  8180. * activity; then, as soon as the slot is available again,
  8181. * initiate an adapter reset.
  8182. */
  8183. static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
  8184. {
  8185. struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
  8186. int i;
  8187. /* Disallow new interrupts, avoid loop */
  8188. for (i = 0; i < ioa_cfg->hrrq_num; i++) {
  8189. spin_lock(&ioa_cfg->hrrq[i]._lock);
  8190. ioa_cfg->hrrq[i].allow_interrupts = 0;
  8191. spin_unlock(&ioa_cfg->hrrq[i]._lock);
  8192. }
  8193. wmb();
  8194. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
  8195. ipr_cmd->done = ipr_reset_ioa_job;
  8196. return IPR_RC_JOB_RETURN;
  8197. }
  8198. /**
  8199. * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
  8200. * @pdev: PCI device struct
  8201. *
  8202. * Description: This routine is called to tell us that the MMIO
  8203. * access to the IOA has been restored
  8204. */
  8205. static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
  8206. {
  8207. unsigned long flags = 0;
  8208. struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
  8209. spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
  8210. if (!ioa_cfg->probe_done)
  8211. pci_save_state(pdev);
  8212. spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
  8213. return PCI_ERS_RESULT_NEED_RESET;
  8214. }
  8215. /**
  8216. * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
  8217. * @pdev: PCI device struct
  8218. *
  8219. * Description: This routine is called to tell us that the PCI bus
  8220. * is down. Can't do anything here, except put the device driver
  8221. * into a holding pattern, waiting for the PCI bus to come back.
  8222. */
  8223. static void ipr_pci_frozen(struct pci_dev *pdev)
  8224. {
  8225. unsigned long flags = 0;
  8226. struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
  8227. spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
  8228. if (ioa_cfg->probe_done)
  8229. _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
  8230. spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
  8231. }
  8232. /**
  8233. * ipr_pci_slot_reset - Called when PCI slot has been reset.
  8234. * @pdev: PCI device struct
  8235. *
  8236. * Description: This routine is called by the pci error recovery
  8237. * code after the PCI slot has been reset, just before we
  8238. * should resume normal operations.
  8239. */
  8240. static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
  8241. {
  8242. unsigned long flags = 0;
  8243. struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
  8244. spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
  8245. if (ioa_cfg->probe_done) {
  8246. if (ioa_cfg->needs_warm_reset)
  8247. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
  8248. else
  8249. _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
  8250. IPR_SHUTDOWN_NONE);
  8251. } else
  8252. wake_up_all(&ioa_cfg->eeh_wait_q);
  8253. spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
  8254. return PCI_ERS_RESULT_RECOVERED;
  8255. }
  8256. /**
  8257. * ipr_pci_perm_failure - Called when PCI slot is dead for good.
  8258. * @pdev: PCI device struct
  8259. *
  8260. * Description: This routine is called when the PCI bus has
  8261. * permanently failed.
  8262. */
  8263. static void ipr_pci_perm_failure(struct pci_dev *pdev)
  8264. {
  8265. unsigned long flags = 0;
  8266. struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
  8267. int i;
  8268. spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
  8269. if (ioa_cfg->probe_done) {
  8270. if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
  8271. ioa_cfg->sdt_state = ABORT_DUMP;
  8272. ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
  8273. ioa_cfg->in_ioa_bringdown = 1;
  8274. for (i = 0; i < ioa_cfg->hrrq_num; i++) {
  8275. spin_lock(&ioa_cfg->hrrq[i]._lock);
  8276. ioa_cfg->hrrq[i].allow_cmds = 0;
  8277. spin_unlock(&ioa_cfg->hrrq[i]._lock);
  8278. }
  8279. wmb();
  8280. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
  8281. } else
  8282. wake_up_all(&ioa_cfg->eeh_wait_q);
  8283. spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
  8284. }
  8285. /**
  8286. * ipr_pci_error_detected - Called when a PCI error is detected.
  8287. * @pdev: PCI device struct
  8288. * @state: PCI channel state
  8289. *
  8290. * Description: Called when a PCI error is detected.
  8291. *
  8292. * Return value:
  8293. * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
  8294. */
  8295. static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
  8296. pci_channel_state_t state)
  8297. {
  8298. switch (state) {
  8299. case pci_channel_io_frozen:
  8300. ipr_pci_frozen(pdev);
  8301. return PCI_ERS_RESULT_CAN_RECOVER;
  8302. case pci_channel_io_perm_failure:
  8303. ipr_pci_perm_failure(pdev);
  8304. return PCI_ERS_RESULT_DISCONNECT;
  8305. default:
  8306. break;
  8307. }
  8308. return PCI_ERS_RESULT_NEED_RESET;
  8309. }
  8310. /**
  8311. * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
  8312. * @ioa_cfg: ioa cfg struct
  8313. *
  8314. * Description: This is the second phase of adapter initialization
  8315. * This function takes care of initilizing the adapter to the point
  8316. * where it can accept new commands.
  8317. * Return value:
  8318. * 0 on success / -EIO on failure
  8319. **/
  8320. static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
  8321. {
  8322. int rc = 0;
  8323. unsigned long host_lock_flags = 0;
  8324. ENTER;
  8325. spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
  8326. dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
  8327. ioa_cfg->probe_done = 1;
  8328. if (ioa_cfg->needs_hard_reset) {
  8329. ioa_cfg->needs_hard_reset = 0;
  8330. ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
  8331. } else
  8332. _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
  8333. IPR_SHUTDOWN_NONE);
  8334. spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
  8335. LEAVE;
  8336. return rc;
  8337. }
  8338. /**
  8339. * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
  8340. * @ioa_cfg: ioa config struct
  8341. *
  8342. * Return value:
  8343. * none
  8344. **/
  8345. static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
  8346. {
  8347. int i;
  8348. if (ioa_cfg->ipr_cmnd_list) {
  8349. for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
  8350. if (ioa_cfg->ipr_cmnd_list[i])
  8351. dma_pool_free(ioa_cfg->ipr_cmd_pool,
  8352. ioa_cfg->ipr_cmnd_list[i],
  8353. ioa_cfg->ipr_cmnd_list_dma[i]);
  8354. ioa_cfg->ipr_cmnd_list[i] = NULL;
  8355. }
  8356. }
  8357. dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
  8358. kfree(ioa_cfg->ipr_cmnd_list);
  8359. kfree(ioa_cfg->ipr_cmnd_list_dma);
  8360. ioa_cfg->ipr_cmnd_list = NULL;
  8361. ioa_cfg->ipr_cmnd_list_dma = NULL;
  8362. ioa_cfg->ipr_cmd_pool = NULL;
  8363. }
  8364. /**
  8365. * ipr_free_mem - Frees memory allocated for an adapter
  8366. * @ioa_cfg: ioa cfg struct
  8367. *
  8368. * Return value:
  8369. * nothing
  8370. **/
  8371. static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
  8372. {
  8373. int i;
  8374. kfree(ioa_cfg->res_entries);
  8375. dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
  8376. ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
  8377. ipr_free_cmd_blks(ioa_cfg);
  8378. for (i = 0; i < ioa_cfg->hrrq_num; i++)
  8379. dma_free_coherent(&ioa_cfg->pdev->dev,
  8380. sizeof(u32) * ioa_cfg->hrrq[i].size,
  8381. ioa_cfg->hrrq[i].host_rrq,
  8382. ioa_cfg->hrrq[i].host_rrq_dma);
  8383. dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
  8384. ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
  8385. for (i = 0; i < IPR_MAX_HCAMS; i++) {
  8386. dma_free_coherent(&ioa_cfg->pdev->dev,
  8387. sizeof(struct ipr_hostrcb),
  8388. ioa_cfg->hostrcb[i],
  8389. ioa_cfg->hostrcb_dma[i]);
  8390. }
  8391. ipr_free_dump(ioa_cfg);
  8392. kfree(ioa_cfg->trace);
  8393. }
  8394. /**
  8395. * ipr_free_irqs - Free all allocated IRQs for the adapter.
  8396. * @ioa_cfg: ipr cfg struct
  8397. *
  8398. * This function frees all allocated IRQs for the
  8399. * specified adapter.
  8400. *
  8401. * Return value:
  8402. * none
  8403. **/
  8404. static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
  8405. {
  8406. struct pci_dev *pdev = ioa_cfg->pdev;
  8407. int i;
  8408. for (i = 0; i < ioa_cfg->nvectors; i++)
  8409. free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
  8410. pci_free_irq_vectors(pdev);
  8411. }
  8412. /**
  8413. * ipr_free_all_resources - Free all allocated resources for an adapter.
  8414. * @ioa_cfg: ioa config struct
  8415. *
  8416. * This function frees all allocated resources for the
  8417. * specified adapter.
  8418. *
  8419. * Return value:
  8420. * none
  8421. **/
  8422. static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
  8423. {
  8424. struct pci_dev *pdev = ioa_cfg->pdev;
  8425. ENTER;
  8426. ipr_free_irqs(ioa_cfg);
  8427. if (ioa_cfg->reset_work_q)
  8428. destroy_workqueue(ioa_cfg->reset_work_q);
  8429. iounmap(ioa_cfg->hdw_dma_regs);
  8430. pci_release_regions(pdev);
  8431. ipr_free_mem(ioa_cfg);
  8432. scsi_host_put(ioa_cfg->host);
  8433. pci_disable_device(pdev);
  8434. LEAVE;
  8435. }
  8436. /**
  8437. * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
  8438. * @ioa_cfg: ioa config struct
  8439. *
  8440. * Return value:
  8441. * 0 on success / -ENOMEM on allocation failure
  8442. **/
  8443. static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
  8444. {
  8445. struct ipr_cmnd *ipr_cmd;
  8446. struct ipr_ioarcb *ioarcb;
  8447. dma_addr_t dma_addr;
  8448. int i, entries_each_hrrq, hrrq_id = 0;
  8449. ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
  8450. sizeof(struct ipr_cmnd), 512, 0);
  8451. if (!ioa_cfg->ipr_cmd_pool)
  8452. return -ENOMEM;
  8453. ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
  8454. ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
  8455. if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
  8456. ipr_free_cmd_blks(ioa_cfg);
  8457. return -ENOMEM;
  8458. }
  8459. for (i = 0; i < ioa_cfg->hrrq_num; i++) {
  8460. if (ioa_cfg->hrrq_num > 1) {
  8461. if (i == 0) {
  8462. entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
  8463. ioa_cfg->hrrq[i].min_cmd_id = 0;
  8464. ioa_cfg->hrrq[i].max_cmd_id =
  8465. (entries_each_hrrq - 1);
  8466. } else {
  8467. entries_each_hrrq =
  8468. IPR_NUM_BASE_CMD_BLKS/
  8469. (ioa_cfg->hrrq_num - 1);
  8470. ioa_cfg->hrrq[i].min_cmd_id =
  8471. IPR_NUM_INTERNAL_CMD_BLKS +
  8472. (i - 1) * entries_each_hrrq;
  8473. ioa_cfg->hrrq[i].max_cmd_id =
  8474. (IPR_NUM_INTERNAL_CMD_BLKS +
  8475. i * entries_each_hrrq - 1);
  8476. }
  8477. } else {
  8478. entries_each_hrrq = IPR_NUM_CMD_BLKS;
  8479. ioa_cfg->hrrq[i].min_cmd_id = 0;
  8480. ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
  8481. }
  8482. ioa_cfg->hrrq[i].size = entries_each_hrrq;
  8483. }
  8484. BUG_ON(ioa_cfg->hrrq_num == 0);
  8485. i = IPR_NUM_CMD_BLKS -
  8486. ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
  8487. if (i > 0) {
  8488. ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
  8489. ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
  8490. }
  8491. for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
  8492. ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
  8493. GFP_KERNEL, &dma_addr);
  8494. if (!ipr_cmd) {
  8495. ipr_free_cmd_blks(ioa_cfg);
  8496. return -ENOMEM;
  8497. }
  8498. ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
  8499. ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
  8500. ioarcb = &ipr_cmd->ioarcb;
  8501. ipr_cmd->dma_addr = dma_addr;
  8502. if (ioa_cfg->sis64)
  8503. ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
  8504. else
  8505. ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
  8506. ioarcb->host_response_handle = cpu_to_be32(i << 2);
  8507. if (ioa_cfg->sis64) {
  8508. ioarcb->u.sis64_addr_data.data_ioadl_addr =
  8509. cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
  8510. ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
  8511. cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
  8512. } else {
  8513. ioarcb->write_ioadl_addr =
  8514. cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
  8515. ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
  8516. ioarcb->ioasa_host_pci_addr =
  8517. cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
  8518. }
  8519. ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
  8520. ipr_cmd->cmd_index = i;
  8521. ipr_cmd->ioa_cfg = ioa_cfg;
  8522. ipr_cmd->sense_buffer_dma = dma_addr +
  8523. offsetof(struct ipr_cmnd, sense_buffer);
  8524. ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
  8525. ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
  8526. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  8527. if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
  8528. hrrq_id++;
  8529. }
  8530. return 0;
  8531. }
  8532. /**
  8533. * ipr_alloc_mem - Allocate memory for an adapter
  8534. * @ioa_cfg: ioa config struct
  8535. *
  8536. * Return value:
  8537. * 0 on success / non-zero for error
  8538. **/
  8539. static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
  8540. {
  8541. struct pci_dev *pdev = ioa_cfg->pdev;
  8542. int i, rc = -ENOMEM;
  8543. ENTER;
  8544. ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
  8545. sizeof(struct ipr_resource_entry),
  8546. GFP_KERNEL);
  8547. if (!ioa_cfg->res_entries)
  8548. goto out;
  8549. for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
  8550. list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
  8551. ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
  8552. }
  8553. ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
  8554. sizeof(struct ipr_misc_cbs),
  8555. &ioa_cfg->vpd_cbs_dma,
  8556. GFP_KERNEL);
  8557. if (!ioa_cfg->vpd_cbs)
  8558. goto out_free_res_entries;
  8559. if (ipr_alloc_cmd_blks(ioa_cfg))
  8560. goto out_free_vpd_cbs;
  8561. for (i = 0; i < ioa_cfg->hrrq_num; i++) {
  8562. ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
  8563. sizeof(u32) * ioa_cfg->hrrq[i].size,
  8564. &ioa_cfg->hrrq[i].host_rrq_dma,
  8565. GFP_KERNEL);
  8566. if (!ioa_cfg->hrrq[i].host_rrq) {
  8567. while (--i >= 0)
  8568. dma_free_coherent(&pdev->dev,
  8569. sizeof(u32) * ioa_cfg->hrrq[i].size,
  8570. ioa_cfg->hrrq[i].host_rrq,
  8571. ioa_cfg->hrrq[i].host_rrq_dma);
  8572. goto out_ipr_free_cmd_blocks;
  8573. }
  8574. ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
  8575. }
  8576. ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
  8577. ioa_cfg->cfg_table_size,
  8578. &ioa_cfg->cfg_table_dma,
  8579. GFP_KERNEL);
  8580. if (!ioa_cfg->u.cfg_table)
  8581. goto out_free_host_rrq;
  8582. for (i = 0; i < IPR_MAX_HCAMS; i++) {
  8583. ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
  8584. sizeof(struct ipr_hostrcb),
  8585. &ioa_cfg->hostrcb_dma[i],
  8586. GFP_KERNEL);
  8587. if (!ioa_cfg->hostrcb[i])
  8588. goto out_free_hostrcb_dma;
  8589. ioa_cfg->hostrcb[i]->hostrcb_dma =
  8590. ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
  8591. ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
  8592. list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
  8593. }
  8594. ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
  8595. sizeof(struct ipr_trace_entry),
  8596. GFP_KERNEL);
  8597. if (!ioa_cfg->trace)
  8598. goto out_free_hostrcb_dma;
  8599. rc = 0;
  8600. out:
  8601. LEAVE;
  8602. return rc;
  8603. out_free_hostrcb_dma:
  8604. while (i-- > 0) {
  8605. dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
  8606. ioa_cfg->hostrcb[i],
  8607. ioa_cfg->hostrcb_dma[i]);
  8608. }
  8609. dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
  8610. ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
  8611. out_free_host_rrq:
  8612. for (i = 0; i < ioa_cfg->hrrq_num; i++) {
  8613. dma_free_coherent(&pdev->dev,
  8614. sizeof(u32) * ioa_cfg->hrrq[i].size,
  8615. ioa_cfg->hrrq[i].host_rrq,
  8616. ioa_cfg->hrrq[i].host_rrq_dma);
  8617. }
  8618. out_ipr_free_cmd_blocks:
  8619. ipr_free_cmd_blks(ioa_cfg);
  8620. out_free_vpd_cbs:
  8621. dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
  8622. ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
  8623. out_free_res_entries:
  8624. kfree(ioa_cfg->res_entries);
  8625. goto out;
  8626. }
  8627. /**
  8628. * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
  8629. * @ioa_cfg: ioa config struct
  8630. *
  8631. * Return value:
  8632. * none
  8633. **/
  8634. static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
  8635. {
  8636. int i;
  8637. for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
  8638. ioa_cfg->bus_attr[i].bus = i;
  8639. ioa_cfg->bus_attr[i].qas_enabled = 0;
  8640. ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
  8641. if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
  8642. ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
  8643. else
  8644. ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
  8645. }
  8646. }
  8647. /**
  8648. * ipr_init_regs - Initialize IOA registers
  8649. * @ioa_cfg: ioa config struct
  8650. *
  8651. * Return value:
  8652. * none
  8653. **/
  8654. static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
  8655. {
  8656. const struct ipr_interrupt_offsets *p;
  8657. struct ipr_interrupts *t;
  8658. void __iomem *base;
  8659. p = &ioa_cfg->chip_cfg->regs;
  8660. t = &ioa_cfg->regs;
  8661. base = ioa_cfg->hdw_dma_regs;
  8662. t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
  8663. t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
  8664. t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
  8665. t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
  8666. t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
  8667. t->clr_interrupt_reg = base + p->clr_interrupt_reg;
  8668. t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
  8669. t->sense_interrupt_reg = base + p->sense_interrupt_reg;
  8670. t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
  8671. t->ioarrin_reg = base + p->ioarrin_reg;
  8672. t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
  8673. t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
  8674. t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
  8675. t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
  8676. t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
  8677. t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
  8678. if (ioa_cfg->sis64) {
  8679. t->init_feedback_reg = base + p->init_feedback_reg;
  8680. t->dump_addr_reg = base + p->dump_addr_reg;
  8681. t->dump_data_reg = base + p->dump_data_reg;
  8682. t->endian_swap_reg = base + p->endian_swap_reg;
  8683. }
  8684. }
  8685. /**
  8686. * ipr_init_ioa_cfg - Initialize IOA config struct
  8687. * @ioa_cfg: ioa config struct
  8688. * @host: scsi host struct
  8689. * @pdev: PCI dev struct
  8690. *
  8691. * Return value:
  8692. * none
  8693. **/
  8694. static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
  8695. struct Scsi_Host *host, struct pci_dev *pdev)
  8696. {
  8697. int i;
  8698. ioa_cfg->host = host;
  8699. ioa_cfg->pdev = pdev;
  8700. ioa_cfg->log_level = ipr_log_level;
  8701. ioa_cfg->doorbell = IPR_DOORBELL;
  8702. sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
  8703. sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
  8704. sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
  8705. sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
  8706. sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
  8707. sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
  8708. INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
  8709. INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
  8710. INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
  8711. INIT_LIST_HEAD(&ioa_cfg->free_res_q);
  8712. INIT_LIST_HEAD(&ioa_cfg->used_res_q);
  8713. INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
  8714. INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
  8715. init_waitqueue_head(&ioa_cfg->reset_wait_q);
  8716. init_waitqueue_head(&ioa_cfg->msi_wait_q);
  8717. init_waitqueue_head(&ioa_cfg->eeh_wait_q);
  8718. ioa_cfg->sdt_state = INACTIVE;
  8719. ipr_initialize_bus_attr(ioa_cfg);
  8720. ioa_cfg->max_devs_supported = ipr_max_devs;
  8721. if (ioa_cfg->sis64) {
  8722. host->max_channel = IPR_MAX_SIS64_BUSES;
  8723. host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
  8724. host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
  8725. if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
  8726. ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
  8727. ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
  8728. + ((sizeof(struct ipr_config_table_entry64)
  8729. * ioa_cfg->max_devs_supported)));
  8730. } else {
  8731. host->max_channel = IPR_VSET_BUS;
  8732. host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
  8733. host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
  8734. if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
  8735. ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
  8736. ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
  8737. + ((sizeof(struct ipr_config_table_entry)
  8738. * ioa_cfg->max_devs_supported)));
  8739. }
  8740. host->unique_id = host->host_no;
  8741. host->max_cmd_len = IPR_MAX_CDB_LEN;
  8742. host->can_queue = ioa_cfg->max_cmds;
  8743. pci_set_drvdata(pdev, ioa_cfg);
  8744. for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
  8745. INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
  8746. INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
  8747. spin_lock_init(&ioa_cfg->hrrq[i]._lock);
  8748. if (i == 0)
  8749. ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
  8750. else
  8751. ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
  8752. }
  8753. }
  8754. /**
  8755. * ipr_get_chip_info - Find adapter chip information
  8756. * @dev_id: PCI device id struct
  8757. *
  8758. * Return value:
  8759. * ptr to chip information on success / NULL on failure
  8760. **/
  8761. static const struct ipr_chip_t *
  8762. ipr_get_chip_info(const struct pci_device_id *dev_id)
  8763. {
  8764. int i;
  8765. for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
  8766. if (ipr_chip[i].vendor == dev_id->vendor &&
  8767. ipr_chip[i].device == dev_id->device)
  8768. return &ipr_chip[i];
  8769. return NULL;
  8770. }
  8771. /**
  8772. * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
  8773. * during probe time
  8774. * @ioa_cfg: ioa config struct
  8775. *
  8776. * Return value:
  8777. * None
  8778. **/
  8779. static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
  8780. {
  8781. struct pci_dev *pdev = ioa_cfg->pdev;
  8782. if (pci_channel_offline(pdev)) {
  8783. wait_event_timeout(ioa_cfg->eeh_wait_q,
  8784. !pci_channel_offline(pdev),
  8785. IPR_PCI_ERROR_RECOVERY_TIMEOUT);
  8786. pci_restore_state(pdev);
  8787. }
  8788. }
  8789. static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
  8790. {
  8791. int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
  8792. for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
  8793. snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
  8794. "host%d-%d", ioa_cfg->host->host_no, vec_idx);
  8795. ioa_cfg->vectors_info[vec_idx].
  8796. desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
  8797. }
  8798. }
  8799. static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
  8800. struct pci_dev *pdev)
  8801. {
  8802. int i, rc;
  8803. for (i = 1; i < ioa_cfg->nvectors; i++) {
  8804. rc = request_irq(pci_irq_vector(pdev, i),
  8805. ipr_isr_mhrrq,
  8806. 0,
  8807. ioa_cfg->vectors_info[i].desc,
  8808. &ioa_cfg->hrrq[i]);
  8809. if (rc) {
  8810. while (--i > 0)
  8811. free_irq(pci_irq_vector(pdev, i),
  8812. &ioa_cfg->hrrq[i]);
  8813. return rc;
  8814. }
  8815. }
  8816. return 0;
  8817. }
  8818. /**
  8819. * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
  8820. * @devp: PCI device struct
  8821. * @irq: IRQ number
  8822. *
  8823. * Description: Simply set the msi_received flag to 1 indicating that
  8824. * Message Signaled Interrupts are supported.
  8825. *
  8826. * Return value:
  8827. * 0 on success / non-zero on failure
  8828. **/
  8829. static irqreturn_t ipr_test_intr(int irq, void *devp)
  8830. {
  8831. struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
  8832. unsigned long lock_flags = 0;
  8833. dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
  8834. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  8835. ioa_cfg->msi_received = 1;
  8836. wake_up(&ioa_cfg->msi_wait_q);
  8837. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  8838. return IRQ_HANDLED;
  8839. }
  8840. /**
  8841. * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
  8842. * @ioa_cfg: ioa config struct
  8843. * @pdev: PCI device struct
  8844. *
  8845. * Description: This routine sets up and initiates a test interrupt to determine
  8846. * if the interrupt is received via the ipr_test_intr() service routine.
  8847. * If the tests fails, the driver will fall back to LSI.
  8848. *
  8849. * Return value:
  8850. * 0 on success / non-zero on failure
  8851. **/
  8852. static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
  8853. {
  8854. int rc;
  8855. unsigned long lock_flags = 0;
  8856. int irq = pci_irq_vector(pdev, 0);
  8857. ENTER;
  8858. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  8859. init_waitqueue_head(&ioa_cfg->msi_wait_q);
  8860. ioa_cfg->msi_received = 0;
  8861. ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
  8862. writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
  8863. readl(ioa_cfg->regs.sense_interrupt_mask_reg);
  8864. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  8865. rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
  8866. if (rc) {
  8867. dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
  8868. return rc;
  8869. } else if (ipr_debug)
  8870. dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
  8871. writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
  8872. readl(ioa_cfg->regs.sense_interrupt_reg);
  8873. wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
  8874. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  8875. ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
  8876. if (!ioa_cfg->msi_received) {
  8877. /* MSI test failed */
  8878. dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
  8879. rc = -EOPNOTSUPP;
  8880. } else if (ipr_debug)
  8881. dev_info(&pdev->dev, "MSI test succeeded.\n");
  8882. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  8883. free_irq(irq, ioa_cfg);
  8884. LEAVE;
  8885. return rc;
  8886. }
  8887. /* ipr_probe_ioa - Allocates memory and does first stage of initialization
  8888. * @pdev: PCI device struct
  8889. * @dev_id: PCI device id struct
  8890. *
  8891. * Return value:
  8892. * 0 on success / non-zero on failure
  8893. **/
  8894. static int ipr_probe_ioa(struct pci_dev *pdev,
  8895. const struct pci_device_id *dev_id)
  8896. {
  8897. struct ipr_ioa_cfg *ioa_cfg;
  8898. struct Scsi_Host *host;
  8899. unsigned long ipr_regs_pci;
  8900. void __iomem *ipr_regs;
  8901. int rc = PCIBIOS_SUCCESSFUL;
  8902. volatile u32 mask, uproc, interrupts;
  8903. unsigned long lock_flags, driver_lock_flags;
  8904. unsigned int irq_flag;
  8905. ENTER;
  8906. dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
  8907. host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
  8908. if (!host) {
  8909. dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
  8910. rc = -ENOMEM;
  8911. goto out;
  8912. }
  8913. ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
  8914. memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
  8915. ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
  8916. ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
  8917. if (!ioa_cfg->ipr_chip) {
  8918. dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
  8919. dev_id->vendor, dev_id->device);
  8920. goto out_scsi_host_put;
  8921. }
  8922. /* set SIS 32 or SIS 64 */
  8923. ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
  8924. ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
  8925. ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
  8926. ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
  8927. if (ipr_transop_timeout)
  8928. ioa_cfg->transop_timeout = ipr_transop_timeout;
  8929. else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
  8930. ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
  8931. else
  8932. ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
  8933. ioa_cfg->revid = pdev->revision;
  8934. ipr_init_ioa_cfg(ioa_cfg, host, pdev);
  8935. ipr_regs_pci = pci_resource_start(pdev, 0);
  8936. rc = pci_request_regions(pdev, IPR_NAME);
  8937. if (rc < 0) {
  8938. dev_err(&pdev->dev,
  8939. "Couldn't register memory range of registers\n");
  8940. goto out_scsi_host_put;
  8941. }
  8942. rc = pci_enable_device(pdev);
  8943. if (rc || pci_channel_offline(pdev)) {
  8944. if (pci_channel_offline(pdev)) {
  8945. ipr_wait_for_pci_err_recovery(ioa_cfg);
  8946. rc = pci_enable_device(pdev);
  8947. }
  8948. if (rc) {
  8949. dev_err(&pdev->dev, "Cannot enable adapter\n");
  8950. ipr_wait_for_pci_err_recovery(ioa_cfg);
  8951. goto out_release_regions;
  8952. }
  8953. }
  8954. ipr_regs = pci_ioremap_bar(pdev, 0);
  8955. if (!ipr_regs) {
  8956. dev_err(&pdev->dev,
  8957. "Couldn't map memory range of registers\n");
  8958. rc = -ENOMEM;
  8959. goto out_disable;
  8960. }
  8961. ioa_cfg->hdw_dma_regs = ipr_regs;
  8962. ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
  8963. ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
  8964. ipr_init_regs(ioa_cfg);
  8965. if (ioa_cfg->sis64) {
  8966. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  8967. if (rc < 0) {
  8968. dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
  8969. rc = dma_set_mask_and_coherent(&pdev->dev,
  8970. DMA_BIT_MASK(32));
  8971. }
  8972. } else
  8973. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  8974. if (rc < 0) {
  8975. dev_err(&pdev->dev, "Failed to set DMA mask\n");
  8976. goto cleanup_nomem;
  8977. }
  8978. rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
  8979. ioa_cfg->chip_cfg->cache_line_size);
  8980. if (rc != PCIBIOS_SUCCESSFUL) {
  8981. dev_err(&pdev->dev, "Write of cache line size failed\n");
  8982. ipr_wait_for_pci_err_recovery(ioa_cfg);
  8983. rc = -EIO;
  8984. goto cleanup_nomem;
  8985. }
  8986. /* Issue MMIO read to ensure card is not in EEH */
  8987. interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
  8988. ipr_wait_for_pci_err_recovery(ioa_cfg);
  8989. if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
  8990. dev_err(&pdev->dev, "The max number of MSIX is %d\n",
  8991. IPR_MAX_MSIX_VECTORS);
  8992. ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
  8993. }
  8994. irq_flag = PCI_IRQ_LEGACY;
  8995. if (ioa_cfg->ipr_chip->has_msi)
  8996. irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
  8997. rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
  8998. if (rc < 0) {
  8999. ipr_wait_for_pci_err_recovery(ioa_cfg);
  9000. goto cleanup_nomem;
  9001. }
  9002. ioa_cfg->nvectors = rc;
  9003. if (!pdev->msi_enabled && !pdev->msix_enabled)
  9004. ioa_cfg->clear_isr = 1;
  9005. pci_set_master(pdev);
  9006. if (pci_channel_offline(pdev)) {
  9007. ipr_wait_for_pci_err_recovery(ioa_cfg);
  9008. pci_set_master(pdev);
  9009. if (pci_channel_offline(pdev)) {
  9010. rc = -EIO;
  9011. goto out_msi_disable;
  9012. }
  9013. }
  9014. if (pdev->msi_enabled || pdev->msix_enabled) {
  9015. rc = ipr_test_msi(ioa_cfg, pdev);
  9016. switch (rc) {
  9017. case 0:
  9018. dev_info(&pdev->dev,
  9019. "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
  9020. pdev->msix_enabled ? "-X" : "");
  9021. break;
  9022. case -EOPNOTSUPP:
  9023. ipr_wait_for_pci_err_recovery(ioa_cfg);
  9024. pci_free_irq_vectors(pdev);
  9025. ioa_cfg->nvectors = 1;
  9026. ioa_cfg->clear_isr = 1;
  9027. break;
  9028. default:
  9029. goto out_msi_disable;
  9030. }
  9031. }
  9032. ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
  9033. (unsigned int)num_online_cpus(),
  9034. (unsigned int)IPR_MAX_HRRQ_NUM);
  9035. if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
  9036. goto out_msi_disable;
  9037. if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
  9038. goto out_msi_disable;
  9039. rc = ipr_alloc_mem(ioa_cfg);
  9040. if (rc < 0) {
  9041. dev_err(&pdev->dev,
  9042. "Couldn't allocate enough memory for device driver!\n");
  9043. goto out_msi_disable;
  9044. }
  9045. /* Save away PCI config space for use following IOA reset */
  9046. rc = pci_save_state(pdev);
  9047. if (rc != PCIBIOS_SUCCESSFUL) {
  9048. dev_err(&pdev->dev, "Failed to save PCI config space\n");
  9049. rc = -EIO;
  9050. goto cleanup_nolog;
  9051. }
  9052. /*
  9053. * If HRRQ updated interrupt is not masked, or reset alert is set,
  9054. * the card is in an unknown state and needs a hard reset
  9055. */
  9056. mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
  9057. interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
  9058. uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
  9059. if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
  9060. ioa_cfg->needs_hard_reset = 1;
  9061. if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
  9062. ioa_cfg->needs_hard_reset = 1;
  9063. if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
  9064. ioa_cfg->ioa_unit_checked = 1;
  9065. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  9066. ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
  9067. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  9068. if (pdev->msi_enabled || pdev->msix_enabled) {
  9069. name_msi_vectors(ioa_cfg);
  9070. rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
  9071. ioa_cfg->vectors_info[0].desc,
  9072. &ioa_cfg->hrrq[0]);
  9073. if (!rc)
  9074. rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
  9075. } else {
  9076. rc = request_irq(pdev->irq, ipr_isr,
  9077. IRQF_SHARED,
  9078. IPR_NAME, &ioa_cfg->hrrq[0]);
  9079. }
  9080. if (rc) {
  9081. dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
  9082. pdev->irq, rc);
  9083. goto cleanup_nolog;
  9084. }
  9085. if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
  9086. (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
  9087. ioa_cfg->needs_warm_reset = 1;
  9088. ioa_cfg->reset = ipr_reset_slot_reset;
  9089. ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
  9090. WQ_MEM_RECLAIM, host->host_no);
  9091. if (!ioa_cfg->reset_work_q) {
  9092. dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
  9093. rc = -ENOMEM;
  9094. goto out_free_irq;
  9095. }
  9096. } else
  9097. ioa_cfg->reset = ipr_reset_start_bist;
  9098. spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
  9099. list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
  9100. spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
  9101. LEAVE;
  9102. out:
  9103. return rc;
  9104. out_free_irq:
  9105. ipr_free_irqs(ioa_cfg);
  9106. cleanup_nolog:
  9107. ipr_free_mem(ioa_cfg);
  9108. out_msi_disable:
  9109. ipr_wait_for_pci_err_recovery(ioa_cfg);
  9110. pci_free_irq_vectors(pdev);
  9111. cleanup_nomem:
  9112. iounmap(ipr_regs);
  9113. out_disable:
  9114. pci_disable_device(pdev);
  9115. out_release_regions:
  9116. pci_release_regions(pdev);
  9117. out_scsi_host_put:
  9118. scsi_host_put(host);
  9119. goto out;
  9120. }
  9121. /**
  9122. * ipr_initiate_ioa_bringdown - Bring down an adapter
  9123. * @ioa_cfg: ioa config struct
  9124. * @shutdown_type: shutdown type
  9125. *
  9126. * Description: This function will initiate bringing down the adapter.
  9127. * This consists of issuing an IOA shutdown to the adapter
  9128. * to flush the cache, and running BIST.
  9129. * If the caller needs to wait on the completion of the reset,
  9130. * the caller must sleep on the reset_wait_q.
  9131. *
  9132. * Return value:
  9133. * none
  9134. **/
  9135. static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
  9136. enum ipr_shutdown_type shutdown_type)
  9137. {
  9138. ENTER;
  9139. if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
  9140. ioa_cfg->sdt_state = ABORT_DUMP;
  9141. ioa_cfg->reset_retries = 0;
  9142. ioa_cfg->in_ioa_bringdown = 1;
  9143. ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
  9144. LEAVE;
  9145. }
  9146. /**
  9147. * __ipr_remove - Remove a single adapter
  9148. * @pdev: pci device struct
  9149. *
  9150. * Adapter hot plug remove entry point.
  9151. *
  9152. * Return value:
  9153. * none
  9154. **/
  9155. static void __ipr_remove(struct pci_dev *pdev)
  9156. {
  9157. unsigned long host_lock_flags = 0;
  9158. struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
  9159. int i;
  9160. unsigned long driver_lock_flags;
  9161. ENTER;
  9162. spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
  9163. while (ioa_cfg->in_reset_reload) {
  9164. spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
  9165. wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
  9166. spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
  9167. }
  9168. for (i = 0; i < ioa_cfg->hrrq_num; i++) {
  9169. spin_lock(&ioa_cfg->hrrq[i]._lock);
  9170. ioa_cfg->hrrq[i].removing_ioa = 1;
  9171. spin_unlock(&ioa_cfg->hrrq[i]._lock);
  9172. }
  9173. wmb();
  9174. ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
  9175. spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
  9176. wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
  9177. flush_work(&ioa_cfg->work_q);
  9178. if (ioa_cfg->reset_work_q)
  9179. flush_workqueue(ioa_cfg->reset_work_q);
  9180. INIT_LIST_HEAD(&ioa_cfg->used_res_q);
  9181. spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
  9182. spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
  9183. list_del(&ioa_cfg->queue);
  9184. spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
  9185. if (ioa_cfg->sdt_state == ABORT_DUMP)
  9186. ioa_cfg->sdt_state = WAIT_FOR_DUMP;
  9187. spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
  9188. ipr_free_all_resources(ioa_cfg);
  9189. LEAVE;
  9190. }
  9191. /**
  9192. * ipr_remove - IOA hot plug remove entry point
  9193. * @pdev: pci device struct
  9194. *
  9195. * Adapter hot plug remove entry point.
  9196. *
  9197. * Return value:
  9198. * none
  9199. **/
  9200. static void ipr_remove(struct pci_dev *pdev)
  9201. {
  9202. struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
  9203. ENTER;
  9204. ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
  9205. &ipr_trace_attr);
  9206. ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
  9207. &ipr_dump_attr);
  9208. sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
  9209. &ipr_ioa_async_err_log);
  9210. scsi_remove_host(ioa_cfg->host);
  9211. __ipr_remove(pdev);
  9212. LEAVE;
  9213. }
  9214. /**
  9215. * ipr_probe - Adapter hot plug add entry point
  9216. * @pdev: pci device struct
  9217. * @dev_id: pci device ID
  9218. *
  9219. * Return value:
  9220. * 0 on success / non-zero on failure
  9221. **/
  9222. static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
  9223. {
  9224. struct ipr_ioa_cfg *ioa_cfg;
  9225. unsigned long flags;
  9226. int rc, i;
  9227. rc = ipr_probe_ioa(pdev, dev_id);
  9228. if (rc)
  9229. return rc;
  9230. ioa_cfg = pci_get_drvdata(pdev);
  9231. rc = ipr_probe_ioa_part2(ioa_cfg);
  9232. if (rc) {
  9233. __ipr_remove(pdev);
  9234. return rc;
  9235. }
  9236. rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
  9237. if (rc) {
  9238. __ipr_remove(pdev);
  9239. return rc;
  9240. }
  9241. rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
  9242. &ipr_trace_attr);
  9243. if (rc) {
  9244. scsi_remove_host(ioa_cfg->host);
  9245. __ipr_remove(pdev);
  9246. return rc;
  9247. }
  9248. rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
  9249. &ipr_ioa_async_err_log);
  9250. if (rc) {
  9251. ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
  9252. &ipr_dump_attr);
  9253. ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
  9254. &ipr_trace_attr);
  9255. scsi_remove_host(ioa_cfg->host);
  9256. __ipr_remove(pdev);
  9257. return rc;
  9258. }
  9259. rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
  9260. &ipr_dump_attr);
  9261. if (rc) {
  9262. sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
  9263. &ipr_ioa_async_err_log);
  9264. ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
  9265. &ipr_trace_attr);
  9266. scsi_remove_host(ioa_cfg->host);
  9267. __ipr_remove(pdev);
  9268. return rc;
  9269. }
  9270. spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
  9271. ioa_cfg->scan_enabled = 1;
  9272. schedule_work(&ioa_cfg->work_q);
  9273. spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
  9274. ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
  9275. if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
  9276. for (i = 1; i < ioa_cfg->hrrq_num; i++) {
  9277. irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
  9278. ioa_cfg->iopoll_weight, ipr_iopoll);
  9279. }
  9280. }
  9281. scsi_scan_host(ioa_cfg->host);
  9282. return 0;
  9283. }
  9284. /**
  9285. * ipr_shutdown - Shutdown handler.
  9286. * @pdev: pci device struct
  9287. *
  9288. * This function is invoked upon system shutdown/reboot. It will issue
  9289. * an adapter shutdown to the adapter to flush the write cache.
  9290. *
  9291. * Return value:
  9292. * none
  9293. **/
  9294. static void ipr_shutdown(struct pci_dev *pdev)
  9295. {
  9296. struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
  9297. unsigned long lock_flags = 0;
  9298. enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
  9299. int i;
  9300. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  9301. if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
  9302. ioa_cfg->iopoll_weight = 0;
  9303. for (i = 1; i < ioa_cfg->hrrq_num; i++)
  9304. irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
  9305. }
  9306. while (ioa_cfg->in_reset_reload) {
  9307. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  9308. wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
  9309. spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  9310. }
  9311. if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
  9312. shutdown_type = IPR_SHUTDOWN_QUIESCE;
  9313. ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
  9314. spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
  9315. wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
  9316. if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
  9317. ipr_free_irqs(ioa_cfg);
  9318. pci_disable_device(ioa_cfg->pdev);
  9319. }
  9320. }
  9321. static struct pci_device_id ipr_pci_table[] = {
  9322. { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
  9323. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
  9324. { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
  9325. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
  9326. { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
  9327. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
  9328. { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
  9329. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
  9330. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
  9331. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
  9332. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
  9333. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
  9334. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
  9335. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
  9336. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
  9337. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
  9338. IPR_USE_LONG_TRANSOP_TIMEOUT },
  9339. { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
  9340. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
  9341. { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
  9342. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
  9343. IPR_USE_LONG_TRANSOP_TIMEOUT },
  9344. { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
  9345. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
  9346. IPR_USE_LONG_TRANSOP_TIMEOUT },
  9347. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
  9348. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
  9349. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
  9350. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
  9351. IPR_USE_LONG_TRANSOP_TIMEOUT},
  9352. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
  9353. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
  9354. IPR_USE_LONG_TRANSOP_TIMEOUT },
  9355. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
  9356. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
  9357. IPR_USE_LONG_TRANSOP_TIMEOUT },
  9358. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
  9359. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
  9360. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
  9361. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
  9362. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
  9363. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
  9364. IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
  9365. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
  9366. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
  9367. { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
  9368. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
  9369. { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
  9370. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
  9371. IPR_USE_LONG_TRANSOP_TIMEOUT },
  9372. { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
  9373. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
  9374. IPR_USE_LONG_TRANSOP_TIMEOUT },
  9375. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
  9376. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
  9377. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
  9378. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
  9379. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
  9380. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
  9381. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
  9382. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
  9383. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
  9384. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
  9385. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
  9386. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
  9387. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9388. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
  9389. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9390. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
  9391. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9392. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
  9393. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9394. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
  9395. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9396. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
  9397. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9398. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
  9399. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9400. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
  9401. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9402. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
  9403. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9404. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
  9405. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9406. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
  9407. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9408. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
  9409. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9410. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
  9411. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9412. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
  9413. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9414. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
  9415. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9416. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
  9417. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9418. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
  9419. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9420. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
  9421. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9422. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
  9423. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9424. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
  9425. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
  9426. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
  9427. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
  9428. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
  9429. { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
  9430. PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
  9431. { }
  9432. };
  9433. MODULE_DEVICE_TABLE(pci, ipr_pci_table);
  9434. static const struct pci_error_handlers ipr_err_handler = {
  9435. .error_detected = ipr_pci_error_detected,
  9436. .mmio_enabled = ipr_pci_mmio_enabled,
  9437. .slot_reset = ipr_pci_slot_reset,
  9438. };
  9439. static struct pci_driver ipr_driver = {
  9440. .name = IPR_NAME,
  9441. .id_table = ipr_pci_table,
  9442. .probe = ipr_probe,
  9443. .remove = ipr_remove,
  9444. .shutdown = ipr_shutdown,
  9445. .err_handler = &ipr_err_handler,
  9446. };
  9447. /**
  9448. * ipr_halt_done - Shutdown prepare completion
  9449. * @ipr_cmd: ipr command struct
  9450. *
  9451. * Return value:
  9452. * none
  9453. **/
  9454. static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
  9455. {
  9456. list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
  9457. }
  9458. /**
  9459. * ipr_halt - Issue shutdown prepare to all adapters
  9460. * @nb: Notifier block
  9461. * @event: Notifier event
  9462. * @buf: Notifier data (unused)
  9463. *
  9464. * Return value:
  9465. * NOTIFY_OK on success / NOTIFY_DONE on failure
  9466. **/
  9467. static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
  9468. {
  9469. struct ipr_cmnd *ipr_cmd;
  9470. struct ipr_ioa_cfg *ioa_cfg;
  9471. unsigned long flags = 0, driver_lock_flags;
  9472. if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
  9473. return NOTIFY_DONE;
  9474. spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
  9475. list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
  9476. spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
  9477. if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
  9478. (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
  9479. spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
  9480. continue;
  9481. }
  9482. ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
  9483. ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
  9484. ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
  9485. ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
  9486. ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
  9487. ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
  9488. spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
  9489. }
  9490. spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
  9491. return NOTIFY_OK;
  9492. }
  9493. static struct notifier_block ipr_notifier = {
  9494. ipr_halt, NULL, 0
  9495. };
  9496. /**
  9497. * ipr_init - Module entry point
  9498. *
  9499. * Return value:
  9500. * 0 on success / negative value on failure
  9501. **/
  9502. static int __init ipr_init(void)
  9503. {
  9504. int rc;
  9505. ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
  9506. IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
  9507. register_reboot_notifier(&ipr_notifier);
  9508. rc = pci_register_driver(&ipr_driver);
  9509. if (rc) {
  9510. unregister_reboot_notifier(&ipr_notifier);
  9511. return rc;
  9512. }
  9513. return 0;
  9514. }
  9515. /**
  9516. * ipr_exit - Module unload
  9517. *
  9518. * Module unload entry point.
  9519. *
  9520. * Return value:
  9521. * none
  9522. **/
  9523. static void __exit ipr_exit(void)
  9524. {
  9525. unregister_reboot_notifier(&ipr_notifier);
  9526. pci_unregister_driver(&ipr_driver);
  9527. }
  9528. module_init(ipr_init);
  9529. module_exit(ipr_exit);