megaraid_sas_base.c 247 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Linux MegaRAID driver for SAS based RAID controllers
  4. *
  5. * Copyright (c) 2003-2013 LSI Corporation
  6. * Copyright (c) 2013-2016 Avago Technologies
  7. * Copyright (c) 2016-2018 Broadcom Inc.
  8. *
  9. * Authors: Broadcom Inc.
  10. * Sreenivas Bagalkote
  11. * Sumant Patro
  12. * Bo Yang
  13. * Adam Radford
  14. * Kashyap Desai <[email protected]>
  15. * Sumit Saxena <[email protected]>
  16. *
  17. * Send feedback to: [email protected]
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/types.h>
  21. #include <linux/pci.h>
  22. #include <linux/list.h>
  23. #include <linux/moduleparam.h>
  24. #include <linux/module.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/delay.h>
  28. #include <linux/uio.h>
  29. #include <linux/slab.h>
  30. #include <linux/uaccess.h>
  31. #include <asm/unaligned.h>
  32. #include <linux/fs.h>
  33. #include <linux/compat.h>
  34. #include <linux/blkdev.h>
  35. #include <linux/mutex.h>
  36. #include <linux/poll.h>
  37. #include <linux/vmalloc.h>
  38. #include <linux/irq_poll.h>
  39. #include <linux/blk-mq-pci.h>
  40. #include <scsi/scsi.h>
  41. #include <scsi/scsi_cmnd.h>
  42. #include <scsi/scsi_device.h>
  43. #include <scsi/scsi_host.h>
  44. #include <scsi/scsi_tcq.h>
  45. #include <scsi/scsi_dbg.h>
  46. #include "megaraid_sas_fusion.h"
  47. #include "megaraid_sas.h"
  48. /*
  49. * Number of sectors per IO command
  50. * Will be set in megasas_init_mfi if user does not provide
  51. */
  52. static unsigned int max_sectors;
  53. module_param_named(max_sectors, max_sectors, int, 0444);
  54. MODULE_PARM_DESC(max_sectors,
  55. "Maximum number of sectors per IO command");
  56. static int msix_disable;
  57. module_param(msix_disable, int, 0444);
  58. MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
  59. static unsigned int msix_vectors;
  60. module_param(msix_vectors, int, 0444);
  61. MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
  62. static int allow_vf_ioctls;
  63. module_param(allow_vf_ioctls, int, 0444);
  64. MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
  65. static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
  66. module_param(throttlequeuedepth, int, 0444);
  67. MODULE_PARM_DESC(throttlequeuedepth,
  68. "Adapter queue depth when throttled due to I/O timeout. Default: 16");
  69. unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
  70. module_param(resetwaittime, int, 0444);
  71. MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
  72. static int smp_affinity_enable = 1;
  73. module_param(smp_affinity_enable, int, 0444);
  74. MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
  75. static int rdpq_enable = 1;
  76. module_param(rdpq_enable, int, 0444);
  77. MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
  78. unsigned int dual_qdepth_disable;
  79. module_param(dual_qdepth_disable, int, 0444);
  80. MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
  81. static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
  82. module_param(scmd_timeout, int, 0444);
  83. MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
  84. int perf_mode = -1;
  85. module_param(perf_mode, int, 0444);
  86. MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
  87. "0 - balanced: High iops and low latency queues are allocated &\n\t\t"
  88. "interrupt coalescing is enabled only on high iops queues\n\t\t"
  89. "1 - iops: High iops queues are not allocated &\n\t\t"
  90. "interrupt coalescing is enabled on all queues\n\t\t"
  91. "2 - latency: High iops queues are not allocated &\n\t\t"
  92. "interrupt coalescing is disabled on all queues\n\t\t"
  93. "default mode is 'balanced'"
  94. );
  95. int event_log_level = MFI_EVT_CLASS_CRITICAL;
  96. module_param(event_log_level, int, 0644);
  97. MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
  98. unsigned int enable_sdev_max_qd;
  99. module_param(enable_sdev_max_qd, int, 0444);
  100. MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
  101. int poll_queues;
  102. module_param(poll_queues, int, 0444);
  103. MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
  104. "This parameter is effective only if host_tagset_enable=1 &\n\t\t"
  105. "It is not applicable for MFI_SERIES. &\n\t\t"
  106. "Driver will work in latency mode. &\n\t\t"
  107. "High iops queues are not allocated &\n\t\t"
  108. );
  109. int host_tagset_enable = 1;
  110. module_param(host_tagset_enable, int, 0444);
  111. MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
  112. MODULE_LICENSE("GPL");
  113. MODULE_VERSION(MEGASAS_VERSION);
  114. MODULE_AUTHOR("[email protected]");
  115. MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
  116. int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
  117. static int megasas_get_pd_list(struct megasas_instance *instance);
  118. static int megasas_ld_list_query(struct megasas_instance *instance,
  119. u8 query_type);
  120. static int megasas_issue_init_mfi(struct megasas_instance *instance);
  121. static int megasas_register_aen(struct megasas_instance *instance,
  122. u32 seq_num, u32 class_locale_word);
  123. static void megasas_get_pd_info(struct megasas_instance *instance,
  124. struct scsi_device *sdev);
  125. static void
  126. megasas_set_ld_removed_by_fw(struct megasas_instance *instance);
  127. /*
  128. * PCI ID table for all supported controllers
  129. */
  130. static struct pci_device_id megasas_pci_table[] = {
  131. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
  132. /* xscale IOP */
  133. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
  134. /* ppc IOP */
  135. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
  136. /* ppc IOP */
  137. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
  138. /* gen2*/
  139. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
  140. /* gen2*/
  141. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
  142. /* skinny*/
  143. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
  144. /* skinny*/
  145. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
  146. /* xscale IOP, vega */
  147. {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
  148. /* xscale IOP */
  149. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
  150. /* Fusion */
  151. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
  152. /* Plasma */
  153. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
  154. /* Invader */
  155. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
  156. /* Fury */
  157. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
  158. /* Intruder */
  159. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
  160. /* Intruder 24 port*/
  161. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
  162. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
  163. /* VENTURA */
  164. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
  165. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
  166. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
  167. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
  168. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
  169. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
  170. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
  171. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
  172. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
  173. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
  174. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)},
  175. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)},
  176. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)},
  177. {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)},
  178. {}
  179. };
  180. MODULE_DEVICE_TABLE(pci, megasas_pci_table);
  181. static int megasas_mgmt_majorno;
  182. struct megasas_mgmt_info megasas_mgmt_info;
  183. static struct fasync_struct *megasas_async_queue;
  184. static DEFINE_MUTEX(megasas_async_queue_mutex);
  185. static int megasas_poll_wait_aen;
  186. static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
  187. static u32 support_poll_for_event;
  188. u32 megasas_dbg_lvl;
  189. static u32 support_device_change;
  190. static bool support_nvme_encapsulation;
  191. static bool support_pci_lane_margining;
  192. /* define lock for aen poll */
  193. static DEFINE_SPINLOCK(poll_aen_lock);
  194. extern struct dentry *megasas_debugfs_root;
  195. extern int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
  196. void
  197. megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
  198. u8 alt_status);
  199. static u32
  200. megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
  201. static int
  202. megasas_adp_reset_gen2(struct megasas_instance *instance,
  203. struct megasas_register_set __iomem *reg_set);
  204. static irqreturn_t megasas_isr(int irq, void *devp);
  205. static u32
  206. megasas_init_adapter_mfi(struct megasas_instance *instance);
  207. u32
  208. megasas_build_and_issue_cmd(struct megasas_instance *instance,
  209. struct scsi_cmnd *scmd);
  210. static void megasas_complete_cmd_dpc(unsigned long instance_addr);
  211. int
  212. wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
  213. int seconds);
  214. void megasas_fusion_ocr_wq(struct work_struct *work);
  215. static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
  216. int initial);
  217. static int
  218. megasas_set_dma_mask(struct megasas_instance *instance);
  219. static int
  220. megasas_alloc_ctrl_mem(struct megasas_instance *instance);
  221. static inline void
  222. megasas_free_ctrl_mem(struct megasas_instance *instance);
  223. static inline int
  224. megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
  225. static inline void
  226. megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
  227. static inline void
  228. megasas_init_ctrl_params(struct megasas_instance *instance);
  229. u32 megasas_readl(struct megasas_instance *instance,
  230. const volatile void __iomem *addr)
  231. {
  232. u32 i = 0, ret_val;
  233. /*
  234. * Due to a HW errata in Aero controllers, reads to certain
  235. * Fusion registers could intermittently return all zeroes.
  236. * This behavior is transient in nature and subsequent reads will
  237. * return valid value. As a workaround in driver, retry readl for
  238. * up to thirty times until a non-zero value is read.
  239. */
  240. if (instance->adapter_type == AERO_SERIES) {
  241. do {
  242. ret_val = readl(addr);
  243. i++;
  244. } while (ret_val == 0 && i < 30);
  245. return ret_val;
  246. } else {
  247. return readl(addr);
  248. }
  249. }
  250. /**
  251. * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs
  252. * @instance: Adapter soft state
  253. * @dcmd: DCMD frame inside MFI command
  254. * @dma_addr: DMA address of buffer to be passed to FW
  255. * @dma_len: Length of DMA buffer to be passed to FW
  256. * @return: void
  257. */
  258. void megasas_set_dma_settings(struct megasas_instance *instance,
  259. struct megasas_dcmd_frame *dcmd,
  260. dma_addr_t dma_addr, u32 dma_len)
  261. {
  262. if (instance->consistent_mask_64bit) {
  263. dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
  264. dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
  265. dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
  266. } else {
  267. dcmd->sgl.sge32[0].phys_addr =
  268. cpu_to_le32(lower_32_bits(dma_addr));
  269. dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
  270. dcmd->flags = cpu_to_le16(dcmd->flags);
  271. }
  272. }
  273. static void
  274. megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
  275. {
  276. instance->instancet->fire_cmd(instance,
  277. cmd->frame_phys_addr, 0, instance->reg_set);
  278. return;
  279. }
  280. /**
  281. * megasas_get_cmd - Get a command from the free pool
  282. * @instance: Adapter soft state
  283. *
  284. * Returns a free command from the pool
  285. */
  286. struct megasas_cmd *megasas_get_cmd(struct megasas_instance
  287. *instance)
  288. {
  289. unsigned long flags;
  290. struct megasas_cmd *cmd = NULL;
  291. spin_lock_irqsave(&instance->mfi_pool_lock, flags);
  292. if (!list_empty(&instance->cmd_pool)) {
  293. cmd = list_entry((&instance->cmd_pool)->next,
  294. struct megasas_cmd, list);
  295. list_del_init(&cmd->list);
  296. } else {
  297. dev_err(&instance->pdev->dev, "Command pool empty!\n");
  298. }
  299. spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
  300. return cmd;
  301. }
  302. /**
  303. * megasas_return_cmd - Return a cmd to free command pool
  304. * @instance: Adapter soft state
  305. * @cmd: Command packet to be returned to free command pool
  306. */
  307. void
  308. megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
  309. {
  310. unsigned long flags;
  311. u32 blk_tags;
  312. struct megasas_cmd_fusion *cmd_fusion;
  313. struct fusion_context *fusion = instance->ctrl_context;
  314. /* This flag is used only for fusion adapter.
  315. * Wait for Interrupt for Polled mode DCMD
  316. */
  317. if (cmd->flags & DRV_DCMD_POLLED_MODE)
  318. return;
  319. spin_lock_irqsave(&instance->mfi_pool_lock, flags);
  320. if (fusion) {
  321. blk_tags = instance->max_scsi_cmds + cmd->index;
  322. cmd_fusion = fusion->cmd_list[blk_tags];
  323. megasas_return_cmd_fusion(instance, cmd_fusion);
  324. }
  325. cmd->scmd = NULL;
  326. cmd->frame_count = 0;
  327. cmd->flags = 0;
  328. memset(cmd->frame, 0, instance->mfi_frame_size);
  329. cmd->frame->io.context = cpu_to_le32(cmd->index);
  330. if (!fusion && reset_devices)
  331. cmd->frame->hdr.cmd = MFI_CMD_INVALID;
  332. list_add(&cmd->list, (&instance->cmd_pool)->next);
  333. spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
  334. }
  335. static const char *
  336. format_timestamp(uint32_t timestamp)
  337. {
  338. static char buffer[32];
  339. if ((timestamp & 0xff000000) == 0xff000000)
  340. snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
  341. 0x00ffffff);
  342. else
  343. snprintf(buffer, sizeof(buffer), "%us", timestamp);
  344. return buffer;
  345. }
  346. static const char *
  347. format_class(int8_t class)
  348. {
  349. static char buffer[6];
  350. switch (class) {
  351. case MFI_EVT_CLASS_DEBUG:
  352. return "debug";
  353. case MFI_EVT_CLASS_PROGRESS:
  354. return "progress";
  355. case MFI_EVT_CLASS_INFO:
  356. return "info";
  357. case MFI_EVT_CLASS_WARNING:
  358. return "WARN";
  359. case MFI_EVT_CLASS_CRITICAL:
  360. return "CRIT";
  361. case MFI_EVT_CLASS_FATAL:
  362. return "FATAL";
  363. case MFI_EVT_CLASS_DEAD:
  364. return "DEAD";
  365. default:
  366. snprintf(buffer, sizeof(buffer), "%d", class);
  367. return buffer;
  368. }
  369. }
  370. /**
  371. * megasas_decode_evt: Decode FW AEN event and print critical event
  372. * for information.
  373. * @instance: Adapter soft state
  374. */
  375. static void
  376. megasas_decode_evt(struct megasas_instance *instance)
  377. {
  378. struct megasas_evt_detail *evt_detail = instance->evt_detail;
  379. union megasas_evt_class_locale class_locale;
  380. class_locale.word = le32_to_cpu(evt_detail->cl.word);
  381. if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
  382. (event_log_level > MFI_EVT_CLASS_DEAD)) {
  383. printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
  384. event_log_level = MFI_EVT_CLASS_CRITICAL;
  385. }
  386. if (class_locale.members.class >= event_log_level)
  387. dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
  388. le32_to_cpu(evt_detail->seq_num),
  389. format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
  390. (class_locale.members.locale),
  391. format_class(class_locale.members.class),
  392. evt_detail->description);
  393. if (megasas_dbg_lvl & LD_PD_DEBUG)
  394. dev_info(&instance->pdev->dev,
  395. "evt_detail.args.ld.target_id/index %d/%d\n",
  396. evt_detail->args.ld.target_id, evt_detail->args.ld.ld_index);
  397. }
  398. /*
  399. * The following functions are defined for xscale
  400. * (deviceid : 1064R, PERC5) controllers
  401. */
  402. /**
  403. * megasas_enable_intr_xscale - Enables interrupts
  404. * @instance: Adapter soft state
  405. */
  406. static inline void
  407. megasas_enable_intr_xscale(struct megasas_instance *instance)
  408. {
  409. struct megasas_register_set __iomem *regs;
  410. regs = instance->reg_set;
  411. writel(0, &(regs)->outbound_intr_mask);
  412. /* Dummy readl to force pci flush */
  413. readl(&regs->outbound_intr_mask);
  414. }
  415. /**
  416. * megasas_disable_intr_xscale -Disables interrupt
  417. * @instance: Adapter soft state
  418. */
  419. static inline void
  420. megasas_disable_intr_xscale(struct megasas_instance *instance)
  421. {
  422. struct megasas_register_set __iomem *regs;
  423. u32 mask = 0x1f;
  424. regs = instance->reg_set;
  425. writel(mask, &regs->outbound_intr_mask);
  426. /* Dummy readl to force pci flush */
  427. readl(&regs->outbound_intr_mask);
  428. }
  429. /**
  430. * megasas_read_fw_status_reg_xscale - returns the current FW status value
  431. * @instance: Adapter soft state
  432. */
  433. static u32
  434. megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
  435. {
  436. return readl(&instance->reg_set->outbound_msg_0);
  437. }
  438. /**
  439. * megasas_clear_intr_xscale - Check & clear interrupt
  440. * @instance: Adapter soft state
  441. */
  442. static int
  443. megasas_clear_intr_xscale(struct megasas_instance *instance)
  444. {
  445. u32 status;
  446. u32 mfiStatus = 0;
  447. struct megasas_register_set __iomem *regs;
  448. regs = instance->reg_set;
  449. /*
  450. * Check if it is our interrupt
  451. */
  452. status = readl(&regs->outbound_intr_status);
  453. if (status & MFI_OB_INTR_STATUS_MASK)
  454. mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
  455. if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
  456. mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
  457. /*
  458. * Clear the interrupt by writing back the same value
  459. */
  460. if (mfiStatus)
  461. writel(status, &regs->outbound_intr_status);
  462. /* Dummy readl to force pci flush */
  463. readl(&regs->outbound_intr_status);
  464. return mfiStatus;
  465. }
  466. /**
  467. * megasas_fire_cmd_xscale - Sends command to the FW
  468. * @instance: Adapter soft state
  469. * @frame_phys_addr : Physical address of cmd
  470. * @frame_count : Number of frames for the command
  471. * @regs : MFI register set
  472. */
  473. static inline void
  474. megasas_fire_cmd_xscale(struct megasas_instance *instance,
  475. dma_addr_t frame_phys_addr,
  476. u32 frame_count,
  477. struct megasas_register_set __iomem *regs)
  478. {
  479. unsigned long flags;
  480. spin_lock_irqsave(&instance->hba_lock, flags);
  481. writel((frame_phys_addr >> 3)|(frame_count),
  482. &(regs)->inbound_queue_port);
  483. spin_unlock_irqrestore(&instance->hba_lock, flags);
  484. }
  485. /**
  486. * megasas_adp_reset_xscale - For controller reset
  487. * @instance: Adapter soft state
  488. * @regs: MFI register set
  489. */
  490. static int
  491. megasas_adp_reset_xscale(struct megasas_instance *instance,
  492. struct megasas_register_set __iomem *regs)
  493. {
  494. u32 i;
  495. u32 pcidata;
  496. writel(MFI_ADP_RESET, &regs->inbound_doorbell);
  497. for (i = 0; i < 3; i++)
  498. msleep(1000); /* sleep for 3 secs */
  499. pcidata = 0;
  500. pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
  501. dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
  502. if (pcidata & 0x2) {
  503. dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
  504. pcidata &= ~0x2;
  505. pci_write_config_dword(instance->pdev,
  506. MFI_1068_PCSR_OFFSET, pcidata);
  507. for (i = 0; i < 2; i++)
  508. msleep(1000); /* need to wait 2 secs again */
  509. pcidata = 0;
  510. pci_read_config_dword(instance->pdev,
  511. MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
  512. dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
  513. if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
  514. dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
  515. pcidata = 0;
  516. pci_write_config_dword(instance->pdev,
  517. MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
  518. }
  519. }
  520. return 0;
  521. }
  522. /**
  523. * megasas_check_reset_xscale - For controller reset check
  524. * @instance: Adapter soft state
  525. * @regs: MFI register set
  526. */
  527. static int
  528. megasas_check_reset_xscale(struct megasas_instance *instance,
  529. struct megasas_register_set __iomem *regs)
  530. {
  531. if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
  532. (le32_to_cpu(*instance->consumer) ==
  533. MEGASAS_ADPRESET_INPROG_SIGN))
  534. return 1;
  535. return 0;
  536. }
  537. static struct megasas_instance_template megasas_instance_template_xscale = {
  538. .fire_cmd = megasas_fire_cmd_xscale,
  539. .enable_intr = megasas_enable_intr_xscale,
  540. .disable_intr = megasas_disable_intr_xscale,
  541. .clear_intr = megasas_clear_intr_xscale,
  542. .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
  543. .adp_reset = megasas_adp_reset_xscale,
  544. .check_reset = megasas_check_reset_xscale,
  545. .service_isr = megasas_isr,
  546. .tasklet = megasas_complete_cmd_dpc,
  547. .init_adapter = megasas_init_adapter_mfi,
  548. .build_and_issue_cmd = megasas_build_and_issue_cmd,
  549. .issue_dcmd = megasas_issue_dcmd,
  550. };
  551. /*
  552. * This is the end of set of functions & definitions specific
  553. * to xscale (deviceid : 1064R, PERC5) controllers
  554. */
  555. /*
  556. * The following functions are defined for ppc (deviceid : 0x60)
  557. * controllers
  558. */
  559. /**
  560. * megasas_enable_intr_ppc - Enables interrupts
  561. * @instance: Adapter soft state
  562. */
  563. static inline void
  564. megasas_enable_intr_ppc(struct megasas_instance *instance)
  565. {
  566. struct megasas_register_set __iomem *regs;
  567. regs = instance->reg_set;
  568. writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
  569. writel(~0x80000000, &(regs)->outbound_intr_mask);
  570. /* Dummy readl to force pci flush */
  571. readl(&regs->outbound_intr_mask);
  572. }
  573. /**
  574. * megasas_disable_intr_ppc - Disable interrupt
  575. * @instance: Adapter soft state
  576. */
  577. static inline void
  578. megasas_disable_intr_ppc(struct megasas_instance *instance)
  579. {
  580. struct megasas_register_set __iomem *regs;
  581. u32 mask = 0xFFFFFFFF;
  582. regs = instance->reg_set;
  583. writel(mask, &regs->outbound_intr_mask);
  584. /* Dummy readl to force pci flush */
  585. readl(&regs->outbound_intr_mask);
  586. }
  587. /**
  588. * megasas_read_fw_status_reg_ppc - returns the current FW status value
  589. * @instance: Adapter soft state
  590. */
  591. static u32
  592. megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
  593. {
  594. return readl(&instance->reg_set->outbound_scratch_pad_0);
  595. }
  596. /**
  597. * megasas_clear_intr_ppc - Check & clear interrupt
  598. * @instance: Adapter soft state
  599. */
  600. static int
  601. megasas_clear_intr_ppc(struct megasas_instance *instance)
  602. {
  603. u32 status, mfiStatus = 0;
  604. struct megasas_register_set __iomem *regs;
  605. regs = instance->reg_set;
  606. /*
  607. * Check if it is our interrupt
  608. */
  609. status = readl(&regs->outbound_intr_status);
  610. if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
  611. mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
  612. if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
  613. mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
  614. /*
  615. * Clear the interrupt by writing back the same value
  616. */
  617. writel(status, &regs->outbound_doorbell_clear);
  618. /* Dummy readl to force pci flush */
  619. readl(&regs->outbound_doorbell_clear);
  620. return mfiStatus;
  621. }
  622. /**
  623. * megasas_fire_cmd_ppc - Sends command to the FW
  624. * @instance: Adapter soft state
  625. * @frame_phys_addr: Physical address of cmd
  626. * @frame_count: Number of frames for the command
  627. * @regs: MFI register set
  628. */
  629. static inline void
  630. megasas_fire_cmd_ppc(struct megasas_instance *instance,
  631. dma_addr_t frame_phys_addr,
  632. u32 frame_count,
  633. struct megasas_register_set __iomem *regs)
  634. {
  635. unsigned long flags;
  636. spin_lock_irqsave(&instance->hba_lock, flags);
  637. writel((frame_phys_addr | (frame_count<<1))|1,
  638. &(regs)->inbound_queue_port);
  639. spin_unlock_irqrestore(&instance->hba_lock, flags);
  640. }
  641. /**
  642. * megasas_check_reset_ppc - For controller reset check
  643. * @instance: Adapter soft state
  644. * @regs: MFI register set
  645. */
  646. static int
  647. megasas_check_reset_ppc(struct megasas_instance *instance,
  648. struct megasas_register_set __iomem *regs)
  649. {
  650. if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
  651. return 1;
  652. return 0;
  653. }
  654. static struct megasas_instance_template megasas_instance_template_ppc = {
  655. .fire_cmd = megasas_fire_cmd_ppc,
  656. .enable_intr = megasas_enable_intr_ppc,
  657. .disable_intr = megasas_disable_intr_ppc,
  658. .clear_intr = megasas_clear_intr_ppc,
  659. .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
  660. .adp_reset = megasas_adp_reset_xscale,
  661. .check_reset = megasas_check_reset_ppc,
  662. .service_isr = megasas_isr,
  663. .tasklet = megasas_complete_cmd_dpc,
  664. .init_adapter = megasas_init_adapter_mfi,
  665. .build_and_issue_cmd = megasas_build_and_issue_cmd,
  666. .issue_dcmd = megasas_issue_dcmd,
  667. };
  668. /**
  669. * megasas_enable_intr_skinny - Enables interrupts
  670. * @instance: Adapter soft state
  671. */
  672. static inline void
  673. megasas_enable_intr_skinny(struct megasas_instance *instance)
  674. {
  675. struct megasas_register_set __iomem *regs;
  676. regs = instance->reg_set;
  677. writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
  678. writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
  679. /* Dummy readl to force pci flush */
  680. readl(&regs->outbound_intr_mask);
  681. }
  682. /**
  683. * megasas_disable_intr_skinny - Disables interrupt
  684. * @instance: Adapter soft state
  685. */
  686. static inline void
  687. megasas_disable_intr_skinny(struct megasas_instance *instance)
  688. {
  689. struct megasas_register_set __iomem *regs;
  690. u32 mask = 0xFFFFFFFF;
  691. regs = instance->reg_set;
  692. writel(mask, &regs->outbound_intr_mask);
  693. /* Dummy readl to force pci flush */
  694. readl(&regs->outbound_intr_mask);
  695. }
  696. /**
  697. * megasas_read_fw_status_reg_skinny - returns the current FW status value
  698. * @instance: Adapter soft state
  699. */
  700. static u32
  701. megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
  702. {
  703. return readl(&instance->reg_set->outbound_scratch_pad_0);
  704. }
  705. /**
  706. * megasas_clear_intr_skinny - Check & clear interrupt
  707. * @instance: Adapter soft state
  708. */
  709. static int
  710. megasas_clear_intr_skinny(struct megasas_instance *instance)
  711. {
  712. u32 status;
  713. u32 mfiStatus = 0;
  714. struct megasas_register_set __iomem *regs;
  715. regs = instance->reg_set;
  716. /*
  717. * Check if it is our interrupt
  718. */
  719. status = readl(&regs->outbound_intr_status);
  720. if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
  721. return 0;
  722. }
  723. /*
  724. * Check if it is our interrupt
  725. */
  726. if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
  727. MFI_STATE_FAULT) {
  728. mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
  729. } else
  730. mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
  731. /*
  732. * Clear the interrupt by writing back the same value
  733. */
  734. writel(status, &regs->outbound_intr_status);
  735. /*
  736. * dummy read to flush PCI
  737. */
  738. readl(&regs->outbound_intr_status);
  739. return mfiStatus;
  740. }
  741. /**
  742. * megasas_fire_cmd_skinny - Sends command to the FW
  743. * @instance: Adapter soft state
  744. * @frame_phys_addr: Physical address of cmd
  745. * @frame_count: Number of frames for the command
  746. * @regs: MFI register set
  747. */
  748. static inline void
  749. megasas_fire_cmd_skinny(struct megasas_instance *instance,
  750. dma_addr_t frame_phys_addr,
  751. u32 frame_count,
  752. struct megasas_register_set __iomem *regs)
  753. {
  754. unsigned long flags;
  755. spin_lock_irqsave(&instance->hba_lock, flags);
  756. writel(upper_32_bits(frame_phys_addr),
  757. &(regs)->inbound_high_queue_port);
  758. writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
  759. &(regs)->inbound_low_queue_port);
  760. spin_unlock_irqrestore(&instance->hba_lock, flags);
  761. }
  762. /**
  763. * megasas_check_reset_skinny - For controller reset check
  764. * @instance: Adapter soft state
  765. * @regs: MFI register set
  766. */
  767. static int
  768. megasas_check_reset_skinny(struct megasas_instance *instance,
  769. struct megasas_register_set __iomem *regs)
  770. {
  771. if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
  772. return 1;
  773. return 0;
  774. }
  775. static struct megasas_instance_template megasas_instance_template_skinny = {
  776. .fire_cmd = megasas_fire_cmd_skinny,
  777. .enable_intr = megasas_enable_intr_skinny,
  778. .disable_intr = megasas_disable_intr_skinny,
  779. .clear_intr = megasas_clear_intr_skinny,
  780. .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
  781. .adp_reset = megasas_adp_reset_gen2,
  782. .check_reset = megasas_check_reset_skinny,
  783. .service_isr = megasas_isr,
  784. .tasklet = megasas_complete_cmd_dpc,
  785. .init_adapter = megasas_init_adapter_mfi,
  786. .build_and_issue_cmd = megasas_build_and_issue_cmd,
  787. .issue_dcmd = megasas_issue_dcmd,
  788. };
  789. /*
  790. * The following functions are defined for gen2 (deviceid : 0x78 0x79)
  791. * controllers
  792. */
  793. /**
  794. * megasas_enable_intr_gen2 - Enables interrupts
  795. * @instance: Adapter soft state
  796. */
  797. static inline void
  798. megasas_enable_intr_gen2(struct megasas_instance *instance)
  799. {
  800. struct megasas_register_set __iomem *regs;
  801. regs = instance->reg_set;
  802. writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
  803. /* write ~0x00000005 (4 & 1) to the intr mask*/
  804. writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
  805. /* Dummy readl to force pci flush */
  806. readl(&regs->outbound_intr_mask);
  807. }
  808. /**
  809. * megasas_disable_intr_gen2 - Disables interrupt
  810. * @instance: Adapter soft state
  811. */
  812. static inline void
  813. megasas_disable_intr_gen2(struct megasas_instance *instance)
  814. {
  815. struct megasas_register_set __iomem *regs;
  816. u32 mask = 0xFFFFFFFF;
  817. regs = instance->reg_set;
  818. writel(mask, &regs->outbound_intr_mask);
  819. /* Dummy readl to force pci flush */
  820. readl(&regs->outbound_intr_mask);
  821. }
  822. /**
  823. * megasas_read_fw_status_reg_gen2 - returns the current FW status value
  824. * @instance: Adapter soft state
  825. */
  826. static u32
  827. megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
  828. {
  829. return readl(&instance->reg_set->outbound_scratch_pad_0);
  830. }
  831. /**
  832. * megasas_clear_intr_gen2 - Check & clear interrupt
  833. * @instance: Adapter soft state
  834. */
  835. static int
  836. megasas_clear_intr_gen2(struct megasas_instance *instance)
  837. {
  838. u32 status;
  839. u32 mfiStatus = 0;
  840. struct megasas_register_set __iomem *regs;
  841. regs = instance->reg_set;
  842. /*
  843. * Check if it is our interrupt
  844. */
  845. status = readl(&regs->outbound_intr_status);
  846. if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
  847. mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
  848. }
  849. if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
  850. mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
  851. }
  852. /*
  853. * Clear the interrupt by writing back the same value
  854. */
  855. if (mfiStatus)
  856. writel(status, &regs->outbound_doorbell_clear);
  857. /* Dummy readl to force pci flush */
  858. readl(&regs->outbound_intr_status);
  859. return mfiStatus;
  860. }
  861. /**
  862. * megasas_fire_cmd_gen2 - Sends command to the FW
  863. * @instance: Adapter soft state
  864. * @frame_phys_addr: Physical address of cmd
  865. * @frame_count: Number of frames for the command
  866. * @regs: MFI register set
  867. */
  868. static inline void
  869. megasas_fire_cmd_gen2(struct megasas_instance *instance,
  870. dma_addr_t frame_phys_addr,
  871. u32 frame_count,
  872. struct megasas_register_set __iomem *regs)
  873. {
  874. unsigned long flags;
  875. spin_lock_irqsave(&instance->hba_lock, flags);
  876. writel((frame_phys_addr | (frame_count<<1))|1,
  877. &(regs)->inbound_queue_port);
  878. spin_unlock_irqrestore(&instance->hba_lock, flags);
  879. }
  880. /**
  881. * megasas_adp_reset_gen2 - For controller reset
  882. * @instance: Adapter soft state
  883. * @reg_set: MFI register set
  884. */
  885. static int
  886. megasas_adp_reset_gen2(struct megasas_instance *instance,
  887. struct megasas_register_set __iomem *reg_set)
  888. {
  889. u32 retry = 0 ;
  890. u32 HostDiag;
  891. u32 __iomem *seq_offset = &reg_set->seq_offset;
  892. u32 __iomem *hostdiag_offset = &reg_set->host_diag;
  893. if (instance->instancet == &megasas_instance_template_skinny) {
  894. seq_offset = &reg_set->fusion_seq_offset;
  895. hostdiag_offset = &reg_set->fusion_host_diag;
  896. }
  897. writel(0, seq_offset);
  898. writel(4, seq_offset);
  899. writel(0xb, seq_offset);
  900. writel(2, seq_offset);
  901. writel(7, seq_offset);
  902. writel(0xd, seq_offset);
  903. msleep(1000);
  904. HostDiag = (u32)readl(hostdiag_offset);
  905. while (!(HostDiag & DIAG_WRITE_ENABLE)) {
  906. msleep(100);
  907. HostDiag = (u32)readl(hostdiag_offset);
  908. dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
  909. retry, HostDiag);
  910. if (retry++ >= 100)
  911. return 1;
  912. }
  913. dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
  914. writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
  915. ssleep(10);
  916. HostDiag = (u32)readl(hostdiag_offset);
  917. while (HostDiag & DIAG_RESET_ADAPTER) {
  918. msleep(100);
  919. HostDiag = (u32)readl(hostdiag_offset);
  920. dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
  921. retry, HostDiag);
  922. if (retry++ >= 1000)
  923. return 1;
  924. }
  925. return 0;
  926. }
  927. /**
  928. * megasas_check_reset_gen2 - For controller reset check
  929. * @instance: Adapter soft state
  930. * @regs: MFI register set
  931. */
  932. static int
  933. megasas_check_reset_gen2(struct megasas_instance *instance,
  934. struct megasas_register_set __iomem *regs)
  935. {
  936. if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
  937. return 1;
  938. return 0;
  939. }
  940. static struct megasas_instance_template megasas_instance_template_gen2 = {
  941. .fire_cmd = megasas_fire_cmd_gen2,
  942. .enable_intr = megasas_enable_intr_gen2,
  943. .disable_intr = megasas_disable_intr_gen2,
  944. .clear_intr = megasas_clear_intr_gen2,
  945. .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
  946. .adp_reset = megasas_adp_reset_gen2,
  947. .check_reset = megasas_check_reset_gen2,
  948. .service_isr = megasas_isr,
  949. .tasklet = megasas_complete_cmd_dpc,
  950. .init_adapter = megasas_init_adapter_mfi,
  951. .build_and_issue_cmd = megasas_build_and_issue_cmd,
  952. .issue_dcmd = megasas_issue_dcmd,
  953. };
  954. /*
  955. * This is the end of set of functions & definitions
  956. * specific to gen2 (deviceid : 0x78, 0x79) controllers
  957. */
  958. /*
  959. * Template added for TB (Fusion)
  960. */
  961. extern struct megasas_instance_template megasas_instance_template_fusion;
  962. /**
  963. * megasas_issue_polled - Issues a polling command
  964. * @instance: Adapter soft state
  965. * @cmd: Command packet to be issued
  966. *
  967. * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
  968. */
  969. int
  970. megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
  971. {
  972. struct megasas_header *frame_hdr = &cmd->frame->hdr;
  973. frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
  974. frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
  975. if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
  976. dev_err(&instance->pdev->dev, "Failed from %s %d\n",
  977. __func__, __LINE__);
  978. return DCMD_INIT;
  979. }
  980. instance->instancet->issue_dcmd(instance, cmd);
  981. return wait_and_poll(instance, cmd, instance->requestorId ?
  982. MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
  983. }
  984. /**
  985. * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
  986. * @instance: Adapter soft state
  987. * @cmd: Command to be issued
  988. * @timeout: Timeout in seconds
  989. *
  990. * This function waits on an event for the command to be returned from ISR.
  991. * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
  992. * Used to issue ioctl commands.
  993. */
  994. int
  995. megasas_issue_blocked_cmd(struct megasas_instance *instance,
  996. struct megasas_cmd *cmd, int timeout)
  997. {
  998. int ret = 0;
  999. cmd->cmd_status_drv = DCMD_INIT;
  1000. if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
  1001. dev_err(&instance->pdev->dev, "Failed from %s %d\n",
  1002. __func__, __LINE__);
  1003. return DCMD_INIT;
  1004. }
  1005. instance->instancet->issue_dcmd(instance, cmd);
  1006. if (timeout) {
  1007. ret = wait_event_timeout(instance->int_cmd_wait_q,
  1008. cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
  1009. if (!ret) {
  1010. dev_err(&instance->pdev->dev,
  1011. "DCMD(opcode: 0x%x) is timed out, func:%s\n",
  1012. cmd->frame->dcmd.opcode, __func__);
  1013. return DCMD_TIMEOUT;
  1014. }
  1015. } else
  1016. wait_event(instance->int_cmd_wait_q,
  1017. cmd->cmd_status_drv != DCMD_INIT);
  1018. return cmd->cmd_status_drv;
  1019. }
  1020. /**
  1021. * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
  1022. * @instance: Adapter soft state
  1023. * @cmd_to_abort: Previously issued cmd to be aborted
  1024. * @timeout: Timeout in seconds
  1025. *
  1026. * MFI firmware can abort previously issued AEN comamnd (automatic event
  1027. * notification). The megasas_issue_blocked_abort_cmd() issues such abort
  1028. * cmd and waits for return status.
  1029. * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
  1030. */
  1031. static int
  1032. megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
  1033. struct megasas_cmd *cmd_to_abort, int timeout)
  1034. {
  1035. struct megasas_cmd *cmd;
  1036. struct megasas_abort_frame *abort_fr;
  1037. int ret = 0;
  1038. u32 opcode;
  1039. cmd = megasas_get_cmd(instance);
  1040. if (!cmd)
  1041. return -1;
  1042. abort_fr = &cmd->frame->abort;
  1043. /*
  1044. * Prepare and issue the abort frame
  1045. */
  1046. abort_fr->cmd = MFI_CMD_ABORT;
  1047. abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
  1048. abort_fr->flags = cpu_to_le16(0);
  1049. abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
  1050. abort_fr->abort_mfi_phys_addr_lo =
  1051. cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
  1052. abort_fr->abort_mfi_phys_addr_hi =
  1053. cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
  1054. cmd->sync_cmd = 1;
  1055. cmd->cmd_status_drv = DCMD_INIT;
  1056. if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
  1057. dev_err(&instance->pdev->dev, "Failed from %s %d\n",
  1058. __func__, __LINE__);
  1059. return DCMD_INIT;
  1060. }
  1061. instance->instancet->issue_dcmd(instance, cmd);
  1062. if (timeout) {
  1063. ret = wait_event_timeout(instance->abort_cmd_wait_q,
  1064. cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
  1065. if (!ret) {
  1066. opcode = cmd_to_abort->frame->dcmd.opcode;
  1067. dev_err(&instance->pdev->dev,
  1068. "Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n",
  1069. opcode, __func__);
  1070. return DCMD_TIMEOUT;
  1071. }
  1072. } else
  1073. wait_event(instance->abort_cmd_wait_q,
  1074. cmd->cmd_status_drv != DCMD_INIT);
  1075. cmd->sync_cmd = 0;
  1076. megasas_return_cmd(instance, cmd);
  1077. return cmd->cmd_status_drv;
  1078. }
  1079. /**
  1080. * megasas_make_sgl32 - Prepares 32-bit SGL
  1081. * @instance: Adapter soft state
  1082. * @scp: SCSI command from the mid-layer
  1083. * @mfi_sgl: SGL to be filled in
  1084. *
  1085. * If successful, this function returns the number of SG elements. Otherwise,
  1086. * it returnes -1.
  1087. */
  1088. static int
  1089. megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
  1090. union megasas_sgl *mfi_sgl)
  1091. {
  1092. int i;
  1093. int sge_count;
  1094. struct scatterlist *os_sgl;
  1095. sge_count = scsi_dma_map(scp);
  1096. BUG_ON(sge_count < 0);
  1097. if (sge_count) {
  1098. scsi_for_each_sg(scp, os_sgl, sge_count, i) {
  1099. mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
  1100. mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
  1101. }
  1102. }
  1103. return sge_count;
  1104. }
  1105. /**
  1106. * megasas_make_sgl64 - Prepares 64-bit SGL
  1107. * @instance: Adapter soft state
  1108. * @scp: SCSI command from the mid-layer
  1109. * @mfi_sgl: SGL to be filled in
  1110. *
  1111. * If successful, this function returns the number of SG elements. Otherwise,
  1112. * it returnes -1.
  1113. */
  1114. static int
  1115. megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
  1116. union megasas_sgl *mfi_sgl)
  1117. {
  1118. int i;
  1119. int sge_count;
  1120. struct scatterlist *os_sgl;
  1121. sge_count = scsi_dma_map(scp);
  1122. BUG_ON(sge_count < 0);
  1123. if (sge_count) {
  1124. scsi_for_each_sg(scp, os_sgl, sge_count, i) {
  1125. mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
  1126. mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
  1127. }
  1128. }
  1129. return sge_count;
  1130. }
  1131. /**
  1132. * megasas_make_sgl_skinny - Prepares IEEE SGL
  1133. * @instance: Adapter soft state
  1134. * @scp: SCSI command from the mid-layer
  1135. * @mfi_sgl: SGL to be filled in
  1136. *
  1137. * If successful, this function returns the number of SG elements. Otherwise,
  1138. * it returnes -1.
  1139. */
  1140. static int
  1141. megasas_make_sgl_skinny(struct megasas_instance *instance,
  1142. struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
  1143. {
  1144. int i;
  1145. int sge_count;
  1146. struct scatterlist *os_sgl;
  1147. sge_count = scsi_dma_map(scp);
  1148. if (sge_count) {
  1149. scsi_for_each_sg(scp, os_sgl, sge_count, i) {
  1150. mfi_sgl->sge_skinny[i].length =
  1151. cpu_to_le32(sg_dma_len(os_sgl));
  1152. mfi_sgl->sge_skinny[i].phys_addr =
  1153. cpu_to_le64(sg_dma_address(os_sgl));
  1154. mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
  1155. }
  1156. }
  1157. return sge_count;
  1158. }
  1159. /**
  1160. * megasas_get_frame_count - Computes the number of frames
  1161. * @frame_type : type of frame- io or pthru frame
  1162. * @sge_count : number of sg elements
  1163. *
  1164. * Returns the number of frames required for numnber of sge's (sge_count)
  1165. */
  1166. static u32 megasas_get_frame_count(struct megasas_instance *instance,
  1167. u8 sge_count, u8 frame_type)
  1168. {
  1169. int num_cnt;
  1170. int sge_bytes;
  1171. u32 sge_sz;
  1172. u32 frame_count = 0;
  1173. sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
  1174. sizeof(struct megasas_sge32);
  1175. if (instance->flag_ieee) {
  1176. sge_sz = sizeof(struct megasas_sge_skinny);
  1177. }
  1178. /*
  1179. * Main frame can contain 2 SGEs for 64-bit SGLs and
  1180. * 3 SGEs for 32-bit SGLs for ldio &
  1181. * 1 SGEs for 64-bit SGLs and
  1182. * 2 SGEs for 32-bit SGLs for pthru frame
  1183. */
  1184. if (unlikely(frame_type == PTHRU_FRAME)) {
  1185. if (instance->flag_ieee == 1) {
  1186. num_cnt = sge_count - 1;
  1187. } else if (IS_DMA64)
  1188. num_cnt = sge_count - 1;
  1189. else
  1190. num_cnt = sge_count - 2;
  1191. } else {
  1192. if (instance->flag_ieee == 1) {
  1193. num_cnt = sge_count - 1;
  1194. } else if (IS_DMA64)
  1195. num_cnt = sge_count - 2;
  1196. else
  1197. num_cnt = sge_count - 3;
  1198. }
  1199. if (num_cnt > 0) {
  1200. sge_bytes = sge_sz * num_cnt;
  1201. frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
  1202. ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
  1203. }
  1204. /* Main frame */
  1205. frame_count += 1;
  1206. if (frame_count > 7)
  1207. frame_count = 8;
  1208. return frame_count;
  1209. }
  1210. /**
  1211. * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
  1212. * @instance: Adapter soft state
  1213. * @scp: SCSI command
  1214. * @cmd: Command to be prepared in
  1215. *
  1216. * This function prepares CDB commands. These are typcially pass-through
  1217. * commands to the devices.
  1218. */
  1219. static int
  1220. megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
  1221. struct megasas_cmd *cmd)
  1222. {
  1223. u32 is_logical;
  1224. u32 device_id;
  1225. u16 flags = 0;
  1226. struct megasas_pthru_frame *pthru;
  1227. is_logical = MEGASAS_IS_LOGICAL(scp->device);
  1228. device_id = MEGASAS_DEV_INDEX(scp);
  1229. pthru = (struct megasas_pthru_frame *)cmd->frame;
  1230. if (scp->sc_data_direction == DMA_TO_DEVICE)
  1231. flags = MFI_FRAME_DIR_WRITE;
  1232. else if (scp->sc_data_direction == DMA_FROM_DEVICE)
  1233. flags = MFI_FRAME_DIR_READ;
  1234. else if (scp->sc_data_direction == DMA_NONE)
  1235. flags = MFI_FRAME_DIR_NONE;
  1236. if (instance->flag_ieee == 1) {
  1237. flags |= MFI_FRAME_IEEE;
  1238. }
  1239. /*
  1240. * Prepare the DCDB frame
  1241. */
  1242. pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
  1243. pthru->cmd_status = 0x0;
  1244. pthru->scsi_status = 0x0;
  1245. pthru->target_id = device_id;
  1246. pthru->lun = scp->device->lun;
  1247. pthru->cdb_len = scp->cmd_len;
  1248. pthru->timeout = 0;
  1249. pthru->pad_0 = 0;
  1250. pthru->flags = cpu_to_le16(flags);
  1251. pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
  1252. memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
  1253. /*
  1254. * If the command is for the tape device, set the
  1255. * pthru timeout to the os layer timeout value.
  1256. */
  1257. if (scp->device->type == TYPE_TAPE) {
  1258. if (scsi_cmd_to_rq(scp)->timeout / HZ > 0xFFFF)
  1259. pthru->timeout = cpu_to_le16(0xFFFF);
  1260. else
  1261. pthru->timeout = cpu_to_le16(scsi_cmd_to_rq(scp)->timeout / HZ);
  1262. }
  1263. /*
  1264. * Construct SGL
  1265. */
  1266. if (instance->flag_ieee == 1) {
  1267. pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
  1268. pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
  1269. &pthru->sgl);
  1270. } else if (IS_DMA64) {
  1271. pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
  1272. pthru->sge_count = megasas_make_sgl64(instance, scp,
  1273. &pthru->sgl);
  1274. } else
  1275. pthru->sge_count = megasas_make_sgl32(instance, scp,
  1276. &pthru->sgl);
  1277. if (pthru->sge_count > instance->max_num_sge) {
  1278. dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
  1279. pthru->sge_count);
  1280. return 0;
  1281. }
  1282. /*
  1283. * Sense info specific
  1284. */
  1285. pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
  1286. pthru->sense_buf_phys_addr_hi =
  1287. cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
  1288. pthru->sense_buf_phys_addr_lo =
  1289. cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
  1290. /*
  1291. * Compute the total number of frames this command consumes. FW uses
  1292. * this number to pull sufficient number of frames from host memory.
  1293. */
  1294. cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
  1295. PTHRU_FRAME);
  1296. return cmd->frame_count;
  1297. }
  1298. /**
  1299. * megasas_build_ldio - Prepares IOs to logical devices
  1300. * @instance: Adapter soft state
  1301. * @scp: SCSI command
  1302. * @cmd: Command to be prepared
  1303. *
  1304. * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
  1305. */
  1306. static int
  1307. megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
  1308. struct megasas_cmd *cmd)
  1309. {
  1310. u32 device_id;
  1311. u8 sc = scp->cmnd[0];
  1312. u16 flags = 0;
  1313. struct megasas_io_frame *ldio;
  1314. device_id = MEGASAS_DEV_INDEX(scp);
  1315. ldio = (struct megasas_io_frame *)cmd->frame;
  1316. if (scp->sc_data_direction == DMA_TO_DEVICE)
  1317. flags = MFI_FRAME_DIR_WRITE;
  1318. else if (scp->sc_data_direction == DMA_FROM_DEVICE)
  1319. flags = MFI_FRAME_DIR_READ;
  1320. if (instance->flag_ieee == 1) {
  1321. flags |= MFI_FRAME_IEEE;
  1322. }
  1323. /*
  1324. * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
  1325. */
  1326. ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
  1327. ldio->cmd_status = 0x0;
  1328. ldio->scsi_status = 0x0;
  1329. ldio->target_id = device_id;
  1330. ldio->timeout = 0;
  1331. ldio->reserved_0 = 0;
  1332. ldio->pad_0 = 0;
  1333. ldio->flags = cpu_to_le16(flags);
  1334. ldio->start_lba_hi = 0;
  1335. ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
  1336. /*
  1337. * 6-byte READ(0x08) or WRITE(0x0A) cdb
  1338. */
  1339. if (scp->cmd_len == 6) {
  1340. ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
  1341. ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
  1342. ((u32) scp->cmnd[2] << 8) |
  1343. (u32) scp->cmnd[3]);
  1344. ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
  1345. }
  1346. /*
  1347. * 10-byte READ(0x28) or WRITE(0x2A) cdb
  1348. */
  1349. else if (scp->cmd_len == 10) {
  1350. ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
  1351. ((u32) scp->cmnd[7] << 8));
  1352. ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
  1353. ((u32) scp->cmnd[3] << 16) |
  1354. ((u32) scp->cmnd[4] << 8) |
  1355. (u32) scp->cmnd[5]);
  1356. }
  1357. /*
  1358. * 12-byte READ(0xA8) or WRITE(0xAA) cdb
  1359. */
  1360. else if (scp->cmd_len == 12) {
  1361. ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
  1362. ((u32) scp->cmnd[7] << 16) |
  1363. ((u32) scp->cmnd[8] << 8) |
  1364. (u32) scp->cmnd[9]);
  1365. ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
  1366. ((u32) scp->cmnd[3] << 16) |
  1367. ((u32) scp->cmnd[4] << 8) |
  1368. (u32) scp->cmnd[5]);
  1369. }
  1370. /*
  1371. * 16-byte READ(0x88) or WRITE(0x8A) cdb
  1372. */
  1373. else if (scp->cmd_len == 16) {
  1374. ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
  1375. ((u32) scp->cmnd[11] << 16) |
  1376. ((u32) scp->cmnd[12] << 8) |
  1377. (u32) scp->cmnd[13]);
  1378. ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
  1379. ((u32) scp->cmnd[7] << 16) |
  1380. ((u32) scp->cmnd[8] << 8) |
  1381. (u32) scp->cmnd[9]);
  1382. ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
  1383. ((u32) scp->cmnd[3] << 16) |
  1384. ((u32) scp->cmnd[4] << 8) |
  1385. (u32) scp->cmnd[5]);
  1386. }
  1387. /*
  1388. * Construct SGL
  1389. */
  1390. if (instance->flag_ieee) {
  1391. ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
  1392. ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
  1393. &ldio->sgl);
  1394. } else if (IS_DMA64) {
  1395. ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
  1396. ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
  1397. } else
  1398. ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
  1399. if (ldio->sge_count > instance->max_num_sge) {
  1400. dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
  1401. ldio->sge_count);
  1402. return 0;
  1403. }
  1404. /*
  1405. * Sense info specific
  1406. */
  1407. ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
  1408. ldio->sense_buf_phys_addr_hi = 0;
  1409. ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
  1410. /*
  1411. * Compute the total number of frames this command consumes. FW uses
  1412. * this number to pull sufficient number of frames from host memory.
  1413. */
  1414. cmd->frame_count = megasas_get_frame_count(instance,
  1415. ldio->sge_count, IO_FRAME);
  1416. return cmd->frame_count;
  1417. }
  1418. /**
  1419. * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD
  1420. * and whether it's RW or non RW
  1421. * @cmd: SCSI command
  1422. *
  1423. */
  1424. inline int megasas_cmd_type(struct scsi_cmnd *cmd)
  1425. {
  1426. int ret;
  1427. switch (cmd->cmnd[0]) {
  1428. case READ_10:
  1429. case WRITE_10:
  1430. case READ_12:
  1431. case WRITE_12:
  1432. case READ_6:
  1433. case WRITE_6:
  1434. case READ_16:
  1435. case WRITE_16:
  1436. ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
  1437. READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
  1438. break;
  1439. default:
  1440. ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
  1441. NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
  1442. }
  1443. return ret;
  1444. }
  1445. /**
  1446. * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
  1447. * in FW
  1448. * @instance: Adapter soft state
  1449. */
  1450. static inline void
  1451. megasas_dump_pending_frames(struct megasas_instance *instance)
  1452. {
  1453. struct megasas_cmd *cmd;
  1454. int i,n;
  1455. union megasas_sgl *mfi_sgl;
  1456. struct megasas_io_frame *ldio;
  1457. struct megasas_pthru_frame *pthru;
  1458. u32 sgcount;
  1459. u16 max_cmd = instance->max_fw_cmds;
  1460. dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
  1461. dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
  1462. if (IS_DMA64)
  1463. dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
  1464. else
  1465. dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
  1466. dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
  1467. for (i = 0; i < max_cmd; i++) {
  1468. cmd = instance->cmd_list[i];
  1469. if (!cmd->scmd)
  1470. continue;
  1471. dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
  1472. if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
  1473. ldio = (struct megasas_io_frame *)cmd->frame;
  1474. mfi_sgl = &ldio->sgl;
  1475. sgcount = ldio->sge_count;
  1476. dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
  1477. " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
  1478. instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
  1479. le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
  1480. le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
  1481. } else {
  1482. pthru = (struct megasas_pthru_frame *) cmd->frame;
  1483. mfi_sgl = &pthru->sgl;
  1484. sgcount = pthru->sge_count;
  1485. dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
  1486. "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
  1487. instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
  1488. pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
  1489. le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
  1490. }
  1491. if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
  1492. for (n = 0; n < sgcount; n++) {
  1493. if (IS_DMA64)
  1494. dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
  1495. le32_to_cpu(mfi_sgl->sge64[n].length),
  1496. le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
  1497. else
  1498. dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
  1499. le32_to_cpu(mfi_sgl->sge32[n].length),
  1500. le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
  1501. }
  1502. }
  1503. } /*for max_cmd*/
  1504. dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
  1505. for (i = 0; i < max_cmd; i++) {
  1506. cmd = instance->cmd_list[i];
  1507. if (cmd->sync_cmd == 1)
  1508. dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
  1509. }
  1510. dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
  1511. }
  1512. u32
  1513. megasas_build_and_issue_cmd(struct megasas_instance *instance,
  1514. struct scsi_cmnd *scmd)
  1515. {
  1516. struct megasas_cmd *cmd;
  1517. u32 frame_count;
  1518. cmd = megasas_get_cmd(instance);
  1519. if (!cmd)
  1520. return SCSI_MLQUEUE_HOST_BUSY;
  1521. /*
  1522. * Logical drive command
  1523. */
  1524. if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
  1525. frame_count = megasas_build_ldio(instance, scmd, cmd);
  1526. else
  1527. frame_count = megasas_build_dcdb(instance, scmd, cmd);
  1528. if (!frame_count)
  1529. goto out_return_cmd;
  1530. cmd->scmd = scmd;
  1531. megasas_priv(scmd)->cmd_priv = cmd;
  1532. /*
  1533. * Issue the command to the FW
  1534. */
  1535. atomic_inc(&instance->fw_outstanding);
  1536. instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
  1537. cmd->frame_count-1, instance->reg_set);
  1538. return 0;
  1539. out_return_cmd:
  1540. megasas_return_cmd(instance, cmd);
  1541. return SCSI_MLQUEUE_HOST_BUSY;
  1542. }
  1543. /**
  1544. * megasas_queue_command - Queue entry point
  1545. * @shost: adapter SCSI host
  1546. * @scmd: SCSI command to be queued
  1547. */
  1548. static int
  1549. megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
  1550. {
  1551. struct megasas_instance *instance;
  1552. struct MR_PRIV_DEVICE *mr_device_priv_data;
  1553. u32 ld_tgt_id;
  1554. instance = (struct megasas_instance *)
  1555. scmd->device->host->hostdata;
  1556. if (instance->unload == 1) {
  1557. scmd->result = DID_NO_CONNECT << 16;
  1558. scsi_done(scmd);
  1559. return 0;
  1560. }
  1561. if (instance->issuepend_done == 0)
  1562. return SCSI_MLQUEUE_HOST_BUSY;
  1563. /* Check for an mpio path and adjust behavior */
  1564. if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
  1565. if (megasas_check_mpio_paths(instance, scmd) ==
  1566. (DID_REQUEUE << 16)) {
  1567. return SCSI_MLQUEUE_HOST_BUSY;
  1568. } else {
  1569. scmd->result = DID_NO_CONNECT << 16;
  1570. scsi_done(scmd);
  1571. return 0;
  1572. }
  1573. }
  1574. mr_device_priv_data = scmd->device->hostdata;
  1575. if (!mr_device_priv_data ||
  1576. (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) {
  1577. scmd->result = DID_NO_CONNECT << 16;
  1578. scsi_done(scmd);
  1579. return 0;
  1580. }
  1581. if (MEGASAS_IS_LOGICAL(scmd->device)) {
  1582. ld_tgt_id = MEGASAS_TARGET_ID(scmd->device);
  1583. if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) {
  1584. scmd->result = DID_NO_CONNECT << 16;
  1585. scsi_done(scmd);
  1586. return 0;
  1587. }
  1588. }
  1589. if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
  1590. return SCSI_MLQUEUE_HOST_BUSY;
  1591. if (mr_device_priv_data->tm_busy)
  1592. return SCSI_MLQUEUE_DEVICE_BUSY;
  1593. scmd->result = 0;
  1594. if (MEGASAS_IS_LOGICAL(scmd->device) &&
  1595. (scmd->device->id >= instance->fw_supported_vd_count ||
  1596. scmd->device->lun)) {
  1597. scmd->result = DID_BAD_TARGET << 16;
  1598. goto out_done;
  1599. }
  1600. if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
  1601. MEGASAS_IS_LOGICAL(scmd->device) &&
  1602. (!instance->fw_sync_cache_support)) {
  1603. scmd->result = DID_OK << 16;
  1604. goto out_done;
  1605. }
  1606. return instance->instancet->build_and_issue_cmd(instance, scmd);
  1607. out_done:
  1608. scsi_done(scmd);
  1609. return 0;
  1610. }
  1611. static struct megasas_instance *megasas_lookup_instance(u16 host_no)
  1612. {
  1613. int i;
  1614. for (i = 0; i < megasas_mgmt_info.max_index; i++) {
  1615. if ((megasas_mgmt_info.instance[i]) &&
  1616. (megasas_mgmt_info.instance[i]->host->host_no == host_no))
  1617. return megasas_mgmt_info.instance[i];
  1618. }
  1619. return NULL;
  1620. }
  1621. /*
  1622. * megasas_set_dynamic_target_properties -
  1623. * Device property set by driver may not be static and it is required to be
  1624. * updated after OCR
  1625. *
  1626. * set tm_capable.
  1627. * set dma alignment (only for eedp protection enable vd).
  1628. *
  1629. * @sdev: OS provided scsi device
  1630. *
  1631. * Returns void
  1632. */
  1633. void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
  1634. bool is_target_prop)
  1635. {
  1636. u16 pd_index = 0, ld;
  1637. u32 device_id;
  1638. struct megasas_instance *instance;
  1639. struct fusion_context *fusion;
  1640. struct MR_PRIV_DEVICE *mr_device_priv_data;
  1641. struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
  1642. struct MR_LD_RAID *raid;
  1643. struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
  1644. instance = megasas_lookup_instance(sdev->host->host_no);
  1645. fusion = instance->ctrl_context;
  1646. mr_device_priv_data = sdev->hostdata;
  1647. if (!fusion || !mr_device_priv_data)
  1648. return;
  1649. if (MEGASAS_IS_LOGICAL(sdev)) {
  1650. device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
  1651. + sdev->id;
  1652. local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
  1653. ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
  1654. if (ld >= instance->fw_supported_vd_count)
  1655. return;
  1656. raid = MR_LdRaidGet(ld, local_map_ptr);
  1657. if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
  1658. blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
  1659. mr_device_priv_data->is_tm_capable =
  1660. raid->capability.tmCapable;
  1661. if (!raid->flags.isEPD)
  1662. sdev->no_write_same = 1;
  1663. } else if (instance->use_seqnum_jbod_fp) {
  1664. pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
  1665. sdev->id;
  1666. pd_sync = (void *)fusion->pd_seq_sync
  1667. [(instance->pd_seq_map_id - 1) & 1];
  1668. mr_device_priv_data->is_tm_capable =
  1669. pd_sync->seq[pd_index].capability.tmCapable;
  1670. }
  1671. if (is_target_prop && instance->tgt_prop->reset_tmo) {
  1672. /*
  1673. * If FW provides a target reset timeout value, driver will use
  1674. * it. If not set, fallback to default values.
  1675. */
  1676. mr_device_priv_data->target_reset_tmo =
  1677. min_t(u8, instance->max_reset_tmo,
  1678. instance->tgt_prop->reset_tmo);
  1679. mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
  1680. } else {
  1681. mr_device_priv_data->target_reset_tmo =
  1682. MEGASAS_DEFAULT_TM_TIMEOUT;
  1683. mr_device_priv_data->task_abort_tmo =
  1684. MEGASAS_DEFAULT_TM_TIMEOUT;
  1685. }
  1686. }
  1687. /*
  1688. * megasas_set_nvme_device_properties -
  1689. * set nomerges=2
  1690. * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
  1691. * set maximum io transfer = MDTS of NVME device provided by MR firmware.
  1692. *
  1693. * MR firmware provides value in KB. Caller of this function converts
  1694. * kb into bytes.
  1695. *
  1696. * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
  1697. * MR firmware provides value 128 as (32 * 4K) = 128K.
  1698. *
  1699. * @sdev: scsi device
  1700. * @max_io_size: maximum io transfer size
  1701. *
  1702. */
  1703. static inline void
  1704. megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
  1705. {
  1706. struct megasas_instance *instance;
  1707. u32 mr_nvme_pg_size;
  1708. instance = (struct megasas_instance *)sdev->host->hostdata;
  1709. mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
  1710. MR_DEFAULT_NVME_PAGE_SIZE);
  1711. blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
  1712. blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
  1713. blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
  1714. }
  1715. /*
  1716. * megasas_set_fw_assisted_qd -
  1717. * set device queue depth to can_queue
  1718. * set device queue depth to fw assisted qd
  1719. *
  1720. * @sdev: scsi device
  1721. * @is_target_prop true, if fw provided target properties.
  1722. */
  1723. static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
  1724. bool is_target_prop)
  1725. {
  1726. u8 interface_type;
  1727. u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
  1728. u32 tgt_device_qd;
  1729. struct megasas_instance *instance;
  1730. struct MR_PRIV_DEVICE *mr_device_priv_data;
  1731. instance = megasas_lookup_instance(sdev->host->host_no);
  1732. mr_device_priv_data = sdev->hostdata;
  1733. interface_type = mr_device_priv_data->interface_type;
  1734. switch (interface_type) {
  1735. case SAS_PD:
  1736. device_qd = MEGASAS_SAS_QD;
  1737. break;
  1738. case SATA_PD:
  1739. device_qd = MEGASAS_SATA_QD;
  1740. break;
  1741. case NVME_PD:
  1742. device_qd = MEGASAS_NVME_QD;
  1743. break;
  1744. }
  1745. if (is_target_prop) {
  1746. tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
  1747. if (tgt_device_qd)
  1748. device_qd = min(instance->host->can_queue,
  1749. (int)tgt_device_qd);
  1750. }
  1751. if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
  1752. device_qd = instance->host->can_queue;
  1753. scsi_change_queue_depth(sdev, device_qd);
  1754. }
  1755. /*
  1756. * megasas_set_static_target_properties -
  1757. * Device property set by driver are static and it is not required to be
  1758. * updated after OCR.
  1759. *
  1760. * set io timeout
  1761. * set device queue depth
  1762. * set nvme device properties. see - megasas_set_nvme_device_properties
  1763. *
  1764. * @sdev: scsi device
  1765. * @is_target_prop true, if fw provided target properties.
  1766. */
  1767. static void megasas_set_static_target_properties(struct scsi_device *sdev,
  1768. bool is_target_prop)
  1769. {
  1770. u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
  1771. struct megasas_instance *instance;
  1772. instance = megasas_lookup_instance(sdev->host->host_no);
  1773. /*
  1774. * The RAID firmware may require extended timeouts.
  1775. */
  1776. blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
  1777. /* max_io_size_kb will be set to non zero for
  1778. * nvme based vd and syspd.
  1779. */
  1780. if (is_target_prop)
  1781. max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
  1782. if (instance->nvme_page_size && max_io_size_kb)
  1783. megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
  1784. megasas_set_fw_assisted_qd(sdev, is_target_prop);
  1785. }
  1786. static int megasas_slave_configure(struct scsi_device *sdev)
  1787. {
  1788. u16 pd_index = 0;
  1789. struct megasas_instance *instance;
  1790. int ret_target_prop = DCMD_FAILED;
  1791. bool is_target_prop = false;
  1792. instance = megasas_lookup_instance(sdev->host->host_no);
  1793. if (instance->pd_list_not_supported) {
  1794. if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
  1795. pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
  1796. sdev->id;
  1797. if (instance->pd_list[pd_index].driveState !=
  1798. MR_PD_STATE_SYSTEM)
  1799. return -ENXIO;
  1800. }
  1801. }
  1802. mutex_lock(&instance->reset_mutex);
  1803. /* Send DCMD to Firmware and cache the information */
  1804. if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
  1805. megasas_get_pd_info(instance, sdev);
  1806. /* Some ventura firmware may not have instance->nvme_page_size set.
  1807. * Do not send MR_DCMD_DRV_GET_TARGET_PROP
  1808. */
  1809. if ((instance->tgt_prop) && (instance->nvme_page_size))
  1810. ret_target_prop = megasas_get_target_prop(instance, sdev);
  1811. is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
  1812. megasas_set_static_target_properties(sdev, is_target_prop);
  1813. /* This sdev property may change post OCR */
  1814. megasas_set_dynamic_target_properties(sdev, is_target_prop);
  1815. mutex_unlock(&instance->reset_mutex);
  1816. return 0;
  1817. }
  1818. static int megasas_slave_alloc(struct scsi_device *sdev)
  1819. {
  1820. u16 pd_index = 0, ld_tgt_id;
  1821. struct megasas_instance *instance ;
  1822. struct MR_PRIV_DEVICE *mr_device_priv_data;
  1823. instance = megasas_lookup_instance(sdev->host->host_no);
  1824. if (!MEGASAS_IS_LOGICAL(sdev)) {
  1825. /*
  1826. * Open the OS scan to the SYSTEM PD
  1827. */
  1828. pd_index =
  1829. (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
  1830. sdev->id;
  1831. if ((instance->pd_list_not_supported ||
  1832. instance->pd_list[pd_index].driveState ==
  1833. MR_PD_STATE_SYSTEM)) {
  1834. goto scan_target;
  1835. }
  1836. return -ENXIO;
  1837. } else if (!MEGASAS_IS_LUN_VALID(sdev)) {
  1838. sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
  1839. return -ENXIO;
  1840. }
  1841. scan_target:
  1842. mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
  1843. GFP_KERNEL);
  1844. if (!mr_device_priv_data)
  1845. return -ENOMEM;
  1846. if (MEGASAS_IS_LOGICAL(sdev)) {
  1847. ld_tgt_id = MEGASAS_TARGET_ID(sdev);
  1848. instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_ACTIVE;
  1849. if (megasas_dbg_lvl & LD_PD_DEBUG)
  1850. sdev_printk(KERN_INFO, sdev, "LD target ID %d created.\n", ld_tgt_id);
  1851. }
  1852. sdev->hostdata = mr_device_priv_data;
  1853. atomic_set(&mr_device_priv_data->r1_ldio_hint,
  1854. instance->r1_ldio_hint_default);
  1855. return 0;
  1856. }
  1857. static void megasas_slave_destroy(struct scsi_device *sdev)
  1858. {
  1859. u16 ld_tgt_id;
  1860. struct megasas_instance *instance;
  1861. instance = megasas_lookup_instance(sdev->host->host_no);
  1862. if (MEGASAS_IS_LOGICAL(sdev)) {
  1863. if (!MEGASAS_IS_LUN_VALID(sdev)) {
  1864. sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
  1865. return;
  1866. }
  1867. ld_tgt_id = MEGASAS_TARGET_ID(sdev);
  1868. instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
  1869. if (megasas_dbg_lvl & LD_PD_DEBUG)
  1870. sdev_printk(KERN_INFO, sdev,
  1871. "LD target ID %d removed from OS stack\n", ld_tgt_id);
  1872. }
  1873. kfree(sdev->hostdata);
  1874. sdev->hostdata = NULL;
  1875. }
  1876. /*
  1877. * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
  1878. * kill adapter
  1879. * @instance: Adapter soft state
  1880. *
  1881. */
  1882. static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
  1883. {
  1884. int i;
  1885. struct megasas_cmd *cmd_mfi;
  1886. struct megasas_cmd_fusion *cmd_fusion;
  1887. struct fusion_context *fusion = instance->ctrl_context;
  1888. /* Find all outstanding ioctls */
  1889. if (fusion) {
  1890. for (i = 0; i < instance->max_fw_cmds; i++) {
  1891. cmd_fusion = fusion->cmd_list[i];
  1892. if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
  1893. cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
  1894. if (cmd_mfi->sync_cmd &&
  1895. (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
  1896. cmd_mfi->frame->hdr.cmd_status =
  1897. MFI_STAT_WRONG_STATE;
  1898. megasas_complete_cmd(instance,
  1899. cmd_mfi, DID_OK);
  1900. }
  1901. }
  1902. }
  1903. } else {
  1904. for (i = 0; i < instance->max_fw_cmds; i++) {
  1905. cmd_mfi = instance->cmd_list[i];
  1906. if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
  1907. MFI_CMD_ABORT)
  1908. megasas_complete_cmd(instance, cmd_mfi, DID_OK);
  1909. }
  1910. }
  1911. }
  1912. void megaraid_sas_kill_hba(struct megasas_instance *instance)
  1913. {
  1914. if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
  1915. dev_warn(&instance->pdev->dev,
  1916. "Adapter already dead, skipping kill HBA\n");
  1917. return;
  1918. }
  1919. /* Set critical error to block I/O & ioctls in case caller didn't */
  1920. atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
  1921. /* Wait 1 second to ensure IO or ioctls in build have posted */
  1922. msleep(1000);
  1923. if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
  1924. (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
  1925. (instance->adapter_type != MFI_SERIES)) {
  1926. if (!instance->requestorId) {
  1927. writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
  1928. /* Flush */
  1929. readl(&instance->reg_set->doorbell);
  1930. }
  1931. if (instance->requestorId && instance->peerIsPresent)
  1932. memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
  1933. } else {
  1934. writel(MFI_STOP_ADP,
  1935. &instance->reg_set->inbound_doorbell);
  1936. }
  1937. /* Complete outstanding ioctls when adapter is killed */
  1938. megasas_complete_outstanding_ioctls(instance);
  1939. }
  1940. /**
  1941. * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
  1942. * restored to max value
  1943. * @instance: Adapter soft state
  1944. *
  1945. */
  1946. void
  1947. megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
  1948. {
  1949. unsigned long flags;
  1950. if (instance->flag & MEGASAS_FW_BUSY
  1951. && time_after(jiffies, instance->last_time + 5 * HZ)
  1952. && atomic_read(&instance->fw_outstanding) <
  1953. instance->throttlequeuedepth + 1) {
  1954. spin_lock_irqsave(instance->host->host_lock, flags);
  1955. instance->flag &= ~MEGASAS_FW_BUSY;
  1956. instance->host->can_queue = instance->cur_can_queue;
  1957. spin_unlock_irqrestore(instance->host->host_lock, flags);
  1958. }
  1959. }
  1960. /**
  1961. * megasas_complete_cmd_dpc - Returns FW's controller structure
  1962. * @instance_addr: Address of adapter soft state
  1963. *
  1964. * Tasklet to complete cmds
  1965. */
  1966. static void megasas_complete_cmd_dpc(unsigned long instance_addr)
  1967. {
  1968. u32 producer;
  1969. u32 consumer;
  1970. u32 context;
  1971. struct megasas_cmd *cmd;
  1972. struct megasas_instance *instance =
  1973. (struct megasas_instance *)instance_addr;
  1974. unsigned long flags;
  1975. /* If we have already declared adapter dead, donot complete cmds */
  1976. if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
  1977. return;
  1978. spin_lock_irqsave(&instance->completion_lock, flags);
  1979. producer = le32_to_cpu(*instance->producer);
  1980. consumer = le32_to_cpu(*instance->consumer);
  1981. while (consumer != producer) {
  1982. context = le32_to_cpu(instance->reply_queue[consumer]);
  1983. if (context >= instance->max_fw_cmds) {
  1984. dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
  1985. context);
  1986. BUG();
  1987. }
  1988. cmd = instance->cmd_list[context];
  1989. megasas_complete_cmd(instance, cmd, DID_OK);
  1990. consumer++;
  1991. if (consumer == (instance->max_fw_cmds + 1)) {
  1992. consumer = 0;
  1993. }
  1994. }
  1995. *instance->consumer = cpu_to_le32(producer);
  1996. spin_unlock_irqrestore(&instance->completion_lock, flags);
  1997. /*
  1998. * Check if we can restore can_queue
  1999. */
  2000. megasas_check_and_restore_queue_depth(instance);
  2001. }
  2002. static void megasas_sriov_heartbeat_handler(struct timer_list *t);
  2003. /**
  2004. * megasas_start_timer - Initializes sriov heartbeat timer object
  2005. * @instance: Adapter soft state
  2006. *
  2007. */
  2008. void megasas_start_timer(struct megasas_instance *instance)
  2009. {
  2010. struct timer_list *timer = &instance->sriov_heartbeat_timer;
  2011. timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
  2012. timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
  2013. add_timer(timer);
  2014. }
  2015. static void
  2016. megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
  2017. static void
  2018. process_fw_state_change_wq(struct work_struct *work);
  2019. static void megasas_do_ocr(struct megasas_instance *instance)
  2020. {
  2021. if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
  2022. (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
  2023. (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
  2024. *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
  2025. }
  2026. instance->instancet->disable_intr(instance);
  2027. atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
  2028. instance->issuepend_done = 0;
  2029. atomic_set(&instance->fw_outstanding, 0);
  2030. megasas_internal_reset_defer_cmds(instance);
  2031. process_fw_state_change_wq(&instance->work_init);
  2032. }
  2033. static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
  2034. int initial)
  2035. {
  2036. struct megasas_cmd *cmd;
  2037. struct megasas_dcmd_frame *dcmd;
  2038. struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
  2039. dma_addr_t new_affiliation_111_h;
  2040. int ld, retval = 0;
  2041. u8 thisVf;
  2042. cmd = megasas_get_cmd(instance);
  2043. if (!cmd) {
  2044. dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
  2045. "Failed to get cmd for scsi%d\n",
  2046. instance->host->host_no);
  2047. return -ENOMEM;
  2048. }
  2049. dcmd = &cmd->frame->dcmd;
  2050. if (!instance->vf_affiliation_111) {
  2051. dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
  2052. "affiliation for scsi%d\n", instance->host->host_no);
  2053. megasas_return_cmd(instance, cmd);
  2054. return -ENOMEM;
  2055. }
  2056. if (initial)
  2057. memset(instance->vf_affiliation_111, 0,
  2058. sizeof(struct MR_LD_VF_AFFILIATION_111));
  2059. else {
  2060. new_affiliation_111 =
  2061. dma_alloc_coherent(&instance->pdev->dev,
  2062. sizeof(struct MR_LD_VF_AFFILIATION_111),
  2063. &new_affiliation_111_h, GFP_KERNEL);
  2064. if (!new_affiliation_111) {
  2065. dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
  2066. "memory for new affiliation for scsi%d\n",
  2067. instance->host->host_no);
  2068. megasas_return_cmd(instance, cmd);
  2069. return -ENOMEM;
  2070. }
  2071. }
  2072. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  2073. dcmd->cmd = MFI_CMD_DCMD;
  2074. dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
  2075. dcmd->sge_count = 1;
  2076. dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
  2077. dcmd->timeout = 0;
  2078. dcmd->pad_0 = 0;
  2079. dcmd->data_xfer_len =
  2080. cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
  2081. dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
  2082. if (initial)
  2083. dcmd->sgl.sge32[0].phys_addr =
  2084. cpu_to_le32(instance->vf_affiliation_111_h);
  2085. else
  2086. dcmd->sgl.sge32[0].phys_addr =
  2087. cpu_to_le32(new_affiliation_111_h);
  2088. dcmd->sgl.sge32[0].length = cpu_to_le32(
  2089. sizeof(struct MR_LD_VF_AFFILIATION_111));
  2090. dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
  2091. "scsi%d\n", instance->host->host_no);
  2092. if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
  2093. dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
  2094. " failed with status 0x%x for scsi%d\n",
  2095. dcmd->cmd_status, instance->host->host_no);
  2096. retval = 1; /* Do a scan if we couldn't get affiliation */
  2097. goto out;
  2098. }
  2099. if (!initial) {
  2100. thisVf = new_affiliation_111->thisVf;
  2101. for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
  2102. if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
  2103. new_affiliation_111->map[ld].policy[thisVf]) {
  2104. dev_warn(&instance->pdev->dev, "SR-IOV: "
  2105. "Got new LD/VF affiliation for scsi%d\n",
  2106. instance->host->host_no);
  2107. memcpy(instance->vf_affiliation_111,
  2108. new_affiliation_111,
  2109. sizeof(struct MR_LD_VF_AFFILIATION_111));
  2110. retval = 1;
  2111. goto out;
  2112. }
  2113. }
  2114. out:
  2115. if (new_affiliation_111) {
  2116. dma_free_coherent(&instance->pdev->dev,
  2117. sizeof(struct MR_LD_VF_AFFILIATION_111),
  2118. new_affiliation_111,
  2119. new_affiliation_111_h);
  2120. }
  2121. megasas_return_cmd(instance, cmd);
  2122. return retval;
  2123. }
  2124. static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
  2125. int initial)
  2126. {
  2127. struct megasas_cmd *cmd;
  2128. struct megasas_dcmd_frame *dcmd;
  2129. struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
  2130. struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
  2131. dma_addr_t new_affiliation_h;
  2132. int i, j, retval = 0, found = 0, doscan = 0;
  2133. u8 thisVf;
  2134. cmd = megasas_get_cmd(instance);
  2135. if (!cmd) {
  2136. dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
  2137. "Failed to get cmd for scsi%d\n",
  2138. instance->host->host_no);
  2139. return -ENOMEM;
  2140. }
  2141. dcmd = &cmd->frame->dcmd;
  2142. if (!instance->vf_affiliation) {
  2143. dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
  2144. "affiliation for scsi%d\n", instance->host->host_no);
  2145. megasas_return_cmd(instance, cmd);
  2146. return -ENOMEM;
  2147. }
  2148. if (initial)
  2149. memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
  2150. sizeof(struct MR_LD_VF_AFFILIATION));
  2151. else {
  2152. new_affiliation =
  2153. dma_alloc_coherent(&instance->pdev->dev,
  2154. (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
  2155. &new_affiliation_h, GFP_KERNEL);
  2156. if (!new_affiliation) {
  2157. dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
  2158. "memory for new affiliation for scsi%d\n",
  2159. instance->host->host_no);
  2160. megasas_return_cmd(instance, cmd);
  2161. return -ENOMEM;
  2162. }
  2163. }
  2164. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  2165. dcmd->cmd = MFI_CMD_DCMD;
  2166. dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
  2167. dcmd->sge_count = 1;
  2168. dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
  2169. dcmd->timeout = 0;
  2170. dcmd->pad_0 = 0;
  2171. dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
  2172. sizeof(struct MR_LD_VF_AFFILIATION));
  2173. dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
  2174. if (initial)
  2175. dcmd->sgl.sge32[0].phys_addr =
  2176. cpu_to_le32(instance->vf_affiliation_h);
  2177. else
  2178. dcmd->sgl.sge32[0].phys_addr =
  2179. cpu_to_le32(new_affiliation_h);
  2180. dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
  2181. sizeof(struct MR_LD_VF_AFFILIATION));
  2182. dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
  2183. "scsi%d\n", instance->host->host_no);
  2184. if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
  2185. dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
  2186. " failed with status 0x%x for scsi%d\n",
  2187. dcmd->cmd_status, instance->host->host_no);
  2188. retval = 1; /* Do a scan if we couldn't get affiliation */
  2189. goto out;
  2190. }
  2191. if (!initial) {
  2192. if (!new_affiliation->ldCount) {
  2193. dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
  2194. "affiliation for passive path for scsi%d\n",
  2195. instance->host->host_no);
  2196. retval = 1;
  2197. goto out;
  2198. }
  2199. newmap = new_affiliation->map;
  2200. savedmap = instance->vf_affiliation->map;
  2201. thisVf = new_affiliation->thisVf;
  2202. for (i = 0 ; i < new_affiliation->ldCount; i++) {
  2203. found = 0;
  2204. for (j = 0; j < instance->vf_affiliation->ldCount;
  2205. j++) {
  2206. if (newmap->ref.targetId ==
  2207. savedmap->ref.targetId) {
  2208. found = 1;
  2209. if (newmap->policy[thisVf] !=
  2210. savedmap->policy[thisVf]) {
  2211. doscan = 1;
  2212. goto out;
  2213. }
  2214. }
  2215. savedmap = (struct MR_LD_VF_MAP *)
  2216. ((unsigned char *)savedmap +
  2217. savedmap->size);
  2218. }
  2219. if (!found && newmap->policy[thisVf] !=
  2220. MR_LD_ACCESS_HIDDEN) {
  2221. doscan = 1;
  2222. goto out;
  2223. }
  2224. newmap = (struct MR_LD_VF_MAP *)
  2225. ((unsigned char *)newmap + newmap->size);
  2226. }
  2227. newmap = new_affiliation->map;
  2228. savedmap = instance->vf_affiliation->map;
  2229. for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
  2230. found = 0;
  2231. for (j = 0 ; j < new_affiliation->ldCount; j++) {
  2232. if (savedmap->ref.targetId ==
  2233. newmap->ref.targetId) {
  2234. found = 1;
  2235. if (savedmap->policy[thisVf] !=
  2236. newmap->policy[thisVf]) {
  2237. doscan = 1;
  2238. goto out;
  2239. }
  2240. }
  2241. newmap = (struct MR_LD_VF_MAP *)
  2242. ((unsigned char *)newmap +
  2243. newmap->size);
  2244. }
  2245. if (!found && savedmap->policy[thisVf] !=
  2246. MR_LD_ACCESS_HIDDEN) {
  2247. doscan = 1;
  2248. goto out;
  2249. }
  2250. savedmap = (struct MR_LD_VF_MAP *)
  2251. ((unsigned char *)savedmap +
  2252. savedmap->size);
  2253. }
  2254. }
  2255. out:
  2256. if (doscan) {
  2257. dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
  2258. "affiliation for scsi%d\n", instance->host->host_no);
  2259. memcpy(instance->vf_affiliation, new_affiliation,
  2260. new_affiliation->size);
  2261. retval = 1;
  2262. }
  2263. if (new_affiliation)
  2264. dma_free_coherent(&instance->pdev->dev,
  2265. (MAX_LOGICAL_DRIVES + 1) *
  2266. sizeof(struct MR_LD_VF_AFFILIATION),
  2267. new_affiliation, new_affiliation_h);
  2268. megasas_return_cmd(instance, cmd);
  2269. return retval;
  2270. }
  2271. /* This function will get the current SR-IOV LD/VF affiliation */
  2272. static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
  2273. int initial)
  2274. {
  2275. int retval;
  2276. if (instance->PlasmaFW111)
  2277. retval = megasas_get_ld_vf_affiliation_111(instance, initial);
  2278. else
  2279. retval = megasas_get_ld_vf_affiliation_12(instance, initial);
  2280. return retval;
  2281. }
  2282. /* This function will tell FW to start the SR-IOV heartbeat */
  2283. int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
  2284. int initial)
  2285. {
  2286. struct megasas_cmd *cmd;
  2287. struct megasas_dcmd_frame *dcmd;
  2288. int retval = 0;
  2289. cmd = megasas_get_cmd(instance);
  2290. if (!cmd) {
  2291. dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
  2292. "Failed to get cmd for scsi%d\n",
  2293. instance->host->host_no);
  2294. return -ENOMEM;
  2295. }
  2296. dcmd = &cmd->frame->dcmd;
  2297. if (initial) {
  2298. instance->hb_host_mem =
  2299. dma_alloc_coherent(&instance->pdev->dev,
  2300. sizeof(struct MR_CTRL_HB_HOST_MEM),
  2301. &instance->hb_host_mem_h,
  2302. GFP_KERNEL);
  2303. if (!instance->hb_host_mem) {
  2304. dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
  2305. " memory for heartbeat host memory for scsi%d\n",
  2306. instance->host->host_no);
  2307. retval = -ENOMEM;
  2308. goto out;
  2309. }
  2310. }
  2311. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  2312. dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
  2313. dcmd->cmd = MFI_CMD_DCMD;
  2314. dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
  2315. dcmd->sge_count = 1;
  2316. dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
  2317. dcmd->timeout = 0;
  2318. dcmd->pad_0 = 0;
  2319. dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
  2320. dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
  2321. megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
  2322. sizeof(struct MR_CTRL_HB_HOST_MEM));
  2323. dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
  2324. instance->host->host_no);
  2325. if ((instance->adapter_type != MFI_SERIES) &&
  2326. !instance->mask_interrupts)
  2327. retval = megasas_issue_blocked_cmd(instance, cmd,
  2328. MEGASAS_ROUTINE_WAIT_TIME_VF);
  2329. else
  2330. retval = megasas_issue_polled(instance, cmd);
  2331. if (retval) {
  2332. dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
  2333. "_MEM_ALLOC DCMD %s for scsi%d\n",
  2334. (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
  2335. "timed out" : "failed", instance->host->host_no);
  2336. retval = 1;
  2337. }
  2338. out:
  2339. megasas_return_cmd(instance, cmd);
  2340. return retval;
  2341. }
  2342. /* Handler for SR-IOV heartbeat */
  2343. static void megasas_sriov_heartbeat_handler(struct timer_list *t)
  2344. {
  2345. struct megasas_instance *instance =
  2346. from_timer(instance, t, sriov_heartbeat_timer);
  2347. if (instance->hb_host_mem->HB.fwCounter !=
  2348. instance->hb_host_mem->HB.driverCounter) {
  2349. instance->hb_host_mem->HB.driverCounter =
  2350. instance->hb_host_mem->HB.fwCounter;
  2351. mod_timer(&instance->sriov_heartbeat_timer,
  2352. jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
  2353. } else {
  2354. dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
  2355. "completed for scsi%d\n", instance->host->host_no);
  2356. schedule_work(&instance->work_init);
  2357. }
  2358. }
  2359. /**
  2360. * megasas_wait_for_outstanding - Wait for all outstanding cmds
  2361. * @instance: Adapter soft state
  2362. *
  2363. * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
  2364. * complete all its outstanding commands. Returns error if one or more IOs
  2365. * are pending after this time period. It also marks the controller dead.
  2366. */
  2367. static int megasas_wait_for_outstanding(struct megasas_instance *instance)
  2368. {
  2369. int i, sl, outstanding;
  2370. u32 reset_index;
  2371. u32 wait_time = MEGASAS_RESET_WAIT_TIME;
  2372. unsigned long flags;
  2373. struct list_head clist_local;
  2374. struct megasas_cmd *reset_cmd;
  2375. u32 fw_state;
  2376. if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
  2377. dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
  2378. __func__, __LINE__);
  2379. return FAILED;
  2380. }
  2381. if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
  2382. INIT_LIST_HEAD(&clist_local);
  2383. spin_lock_irqsave(&instance->hba_lock, flags);
  2384. list_splice_init(&instance->internal_reset_pending_q,
  2385. &clist_local);
  2386. spin_unlock_irqrestore(&instance->hba_lock, flags);
  2387. dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
  2388. for (i = 0; i < wait_time; i++) {
  2389. msleep(1000);
  2390. if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
  2391. break;
  2392. }
  2393. if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
  2394. dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
  2395. atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
  2396. return FAILED;
  2397. }
  2398. reset_index = 0;
  2399. while (!list_empty(&clist_local)) {
  2400. reset_cmd = list_entry((&clist_local)->next,
  2401. struct megasas_cmd, list);
  2402. list_del_init(&reset_cmd->list);
  2403. if (reset_cmd->scmd) {
  2404. reset_cmd->scmd->result = DID_REQUEUE << 16;
  2405. dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
  2406. reset_index, reset_cmd,
  2407. reset_cmd->scmd->cmnd[0]);
  2408. scsi_done(reset_cmd->scmd);
  2409. megasas_return_cmd(instance, reset_cmd);
  2410. } else if (reset_cmd->sync_cmd) {
  2411. dev_notice(&instance->pdev->dev, "%p synch cmds"
  2412. "reset queue\n",
  2413. reset_cmd);
  2414. reset_cmd->cmd_status_drv = DCMD_INIT;
  2415. instance->instancet->fire_cmd(instance,
  2416. reset_cmd->frame_phys_addr,
  2417. 0, instance->reg_set);
  2418. } else {
  2419. dev_notice(&instance->pdev->dev, "%p unexpected"
  2420. "cmds lst\n",
  2421. reset_cmd);
  2422. }
  2423. reset_index++;
  2424. }
  2425. return SUCCESS;
  2426. }
  2427. for (i = 0; i < resetwaittime; i++) {
  2428. outstanding = atomic_read(&instance->fw_outstanding);
  2429. if (!outstanding)
  2430. break;
  2431. if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
  2432. dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
  2433. "commands to complete\n",i,outstanding);
  2434. /*
  2435. * Call cmd completion routine. Cmd to be
  2436. * be completed directly without depending on isr.
  2437. */
  2438. megasas_complete_cmd_dpc((unsigned long)instance);
  2439. }
  2440. msleep(1000);
  2441. }
  2442. i = 0;
  2443. outstanding = atomic_read(&instance->fw_outstanding);
  2444. fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
  2445. if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
  2446. goto no_outstanding;
  2447. if (instance->disableOnlineCtrlReset)
  2448. goto kill_hba_and_failed;
  2449. do {
  2450. if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
  2451. dev_info(&instance->pdev->dev,
  2452. "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
  2453. __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
  2454. if (i == 3)
  2455. goto kill_hba_and_failed;
  2456. megasas_do_ocr(instance);
  2457. if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
  2458. dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
  2459. __func__, __LINE__);
  2460. return FAILED;
  2461. }
  2462. dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
  2463. __func__, __LINE__);
  2464. for (sl = 0; sl < 10; sl++)
  2465. msleep(500);
  2466. outstanding = atomic_read(&instance->fw_outstanding);
  2467. fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
  2468. if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
  2469. goto no_outstanding;
  2470. }
  2471. i++;
  2472. } while (i <= 3);
  2473. no_outstanding:
  2474. dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
  2475. __func__, __LINE__);
  2476. return SUCCESS;
  2477. kill_hba_and_failed:
  2478. /* Reset not supported, kill adapter */
  2479. dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
  2480. " disableOnlineCtrlReset %d fw_outstanding %d \n",
  2481. __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
  2482. atomic_read(&instance->fw_outstanding));
  2483. megasas_dump_pending_frames(instance);
  2484. megaraid_sas_kill_hba(instance);
  2485. return FAILED;
  2486. }
  2487. /**
  2488. * megasas_generic_reset - Generic reset routine
  2489. * @scmd: Mid-layer SCSI command
  2490. *
  2491. * This routine implements a generic reset handler for device, bus and host
  2492. * reset requests. Device, bus and host specific reset handlers can use this
  2493. * function after they do their specific tasks.
  2494. */
  2495. static int megasas_generic_reset(struct scsi_cmnd *scmd)
  2496. {
  2497. int ret_val;
  2498. struct megasas_instance *instance;
  2499. instance = (struct megasas_instance *)scmd->device->host->hostdata;
  2500. scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
  2501. scmd->cmnd[0], scmd->retries);
  2502. if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
  2503. dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
  2504. return FAILED;
  2505. }
  2506. ret_val = megasas_wait_for_outstanding(instance);
  2507. if (ret_val == SUCCESS)
  2508. dev_notice(&instance->pdev->dev, "reset successful\n");
  2509. else
  2510. dev_err(&instance->pdev->dev, "failed to do reset\n");
  2511. return ret_val;
  2512. }
  2513. /**
  2514. * megasas_reset_timer - quiesce the adapter if required
  2515. * @scmd: scsi cmnd
  2516. *
  2517. * Sets the FW busy flag and reduces the host->can_queue if the
  2518. * cmd has not been completed within the timeout period.
  2519. */
  2520. static enum scsi_timeout_action megasas_reset_timer(struct scsi_cmnd *scmd)
  2521. {
  2522. struct megasas_instance *instance;
  2523. unsigned long flags;
  2524. if (time_after(jiffies, scmd->jiffies_at_alloc +
  2525. (scmd_timeout * 2) * HZ)) {
  2526. return SCSI_EH_NOT_HANDLED;
  2527. }
  2528. instance = (struct megasas_instance *)scmd->device->host->hostdata;
  2529. if (!(instance->flag & MEGASAS_FW_BUSY)) {
  2530. /* FW is busy, throttle IO */
  2531. spin_lock_irqsave(instance->host->host_lock, flags);
  2532. instance->host->can_queue = instance->throttlequeuedepth;
  2533. instance->last_time = jiffies;
  2534. instance->flag |= MEGASAS_FW_BUSY;
  2535. spin_unlock_irqrestore(instance->host->host_lock, flags);
  2536. }
  2537. return SCSI_EH_RESET_TIMER;
  2538. }
  2539. /**
  2540. * megasas_dump - This function will print hexdump of provided buffer.
  2541. * @buf: Buffer to be dumped
  2542. * @sz: Size in bytes
  2543. * @format: Different formats of dumping e.g. format=n will
  2544. * cause only 'n' 32 bit words to be dumped in a single
  2545. * line.
  2546. */
  2547. inline void
  2548. megasas_dump(void *buf, int sz, int format)
  2549. {
  2550. int i;
  2551. __le32 *buf_loc = (__le32 *)buf;
  2552. for (i = 0; i < (sz / sizeof(__le32)); i++) {
  2553. if ((i % format) == 0) {
  2554. if (i != 0)
  2555. printk(KERN_CONT "\n");
  2556. printk(KERN_CONT "%08x: ", (i * 4));
  2557. }
  2558. printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i]));
  2559. }
  2560. printk(KERN_CONT "\n");
  2561. }
  2562. /**
  2563. * megasas_dump_reg_set - This function will print hexdump of register set
  2564. * @reg_set: Register set to be dumped
  2565. */
  2566. inline void
  2567. megasas_dump_reg_set(void __iomem *reg_set)
  2568. {
  2569. unsigned int i, sz = 256;
  2570. u32 __iomem *reg = (u32 __iomem *)reg_set;
  2571. for (i = 0; i < (sz / sizeof(u32)); i++)
  2572. printk("%08x: %08x\n", (i * 4), readl(&reg[i]));
  2573. }
  2574. /**
  2575. * megasas_dump_fusion_io - This function will print key details
  2576. * of SCSI IO
  2577. * @scmd: SCSI command pointer of SCSI IO
  2578. */
  2579. void
  2580. megasas_dump_fusion_io(struct scsi_cmnd *scmd)
  2581. {
  2582. struct megasas_cmd_fusion *cmd = megasas_priv(scmd)->cmd_priv;
  2583. union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
  2584. struct megasas_instance *instance;
  2585. instance = (struct megasas_instance *)scmd->device->host->hostdata;
  2586. scmd_printk(KERN_INFO, scmd,
  2587. "scmd: (0x%p) retries: 0x%x allowed: 0x%x\n",
  2588. scmd, scmd->retries, scmd->allowed);
  2589. scsi_print_command(scmd);
  2590. if (cmd) {
  2591. req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
  2592. scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n");
  2593. scmd_printk(KERN_INFO, scmd,
  2594. "RequestFlags:0x%x MSIxIndex:0x%x SMID:0x%x LMID:0x%x DevHandle:0x%x\n",
  2595. req_desc->SCSIIO.RequestFlags,
  2596. req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID,
  2597. req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle);
  2598. printk(KERN_INFO "IO request frame:\n");
  2599. megasas_dump(cmd->io_request,
  2600. MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8);
  2601. printk(KERN_INFO "Chain frame:\n");
  2602. megasas_dump(cmd->sg_frame,
  2603. instance->max_chain_frame_sz, 8);
  2604. }
  2605. }
  2606. /*
  2607. * megasas_dump_sys_regs - This function will dump system registers through
  2608. * sysfs.
  2609. * @reg_set: Pointer to System register set.
  2610. * @buf: Buffer to which output is to be written.
  2611. * @return: Number of bytes written to buffer.
  2612. */
  2613. static inline ssize_t
  2614. megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
  2615. {
  2616. unsigned int i, sz = 256;
  2617. int bytes_wrote = 0;
  2618. char *loc = (char *)buf;
  2619. u32 __iomem *reg = (u32 __iomem *)reg_set;
  2620. for (i = 0; i < sz / sizeof(u32); i++) {
  2621. bytes_wrote += scnprintf(loc + bytes_wrote,
  2622. PAGE_SIZE - bytes_wrote,
  2623. "%08x: %08x\n", (i * 4),
  2624. readl(&reg[i]));
  2625. }
  2626. return bytes_wrote;
  2627. }
  2628. /**
  2629. * megasas_reset_bus_host - Bus & host reset handler entry point
  2630. * @scmd: Mid-layer SCSI command
  2631. */
  2632. static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
  2633. {
  2634. int ret;
  2635. struct megasas_instance *instance;
  2636. instance = (struct megasas_instance *)scmd->device->host->hostdata;
  2637. scmd_printk(KERN_INFO, scmd,
  2638. "OCR is requested due to IO timeout!!\n");
  2639. scmd_printk(KERN_INFO, scmd,
  2640. "SCSI host state: %d SCSI host busy: %d FW outstanding: %d\n",
  2641. scmd->device->host->shost_state,
  2642. scsi_host_busy(scmd->device->host),
  2643. atomic_read(&instance->fw_outstanding));
  2644. /*
  2645. * First wait for all commands to complete
  2646. */
  2647. if (instance->adapter_type == MFI_SERIES) {
  2648. ret = megasas_generic_reset(scmd);
  2649. } else {
  2650. megasas_dump_fusion_io(scmd);
  2651. ret = megasas_reset_fusion(scmd->device->host,
  2652. SCSIIO_TIMEOUT_OCR);
  2653. }
  2654. return ret;
  2655. }
  2656. /**
  2657. * megasas_task_abort - Issues task abort request to firmware
  2658. * (supported only for fusion adapters)
  2659. * @scmd: SCSI command pointer
  2660. */
  2661. static int megasas_task_abort(struct scsi_cmnd *scmd)
  2662. {
  2663. int ret;
  2664. struct megasas_instance *instance;
  2665. instance = (struct megasas_instance *)scmd->device->host->hostdata;
  2666. if (instance->adapter_type != MFI_SERIES)
  2667. ret = megasas_task_abort_fusion(scmd);
  2668. else {
  2669. sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
  2670. ret = FAILED;
  2671. }
  2672. return ret;
  2673. }
  2674. /**
  2675. * megasas_reset_target: Issues target reset request to firmware
  2676. * (supported only for fusion adapters)
  2677. * @scmd: SCSI command pointer
  2678. */
  2679. static int megasas_reset_target(struct scsi_cmnd *scmd)
  2680. {
  2681. int ret;
  2682. struct megasas_instance *instance;
  2683. instance = (struct megasas_instance *)scmd->device->host->hostdata;
  2684. if (instance->adapter_type != MFI_SERIES)
  2685. ret = megasas_reset_target_fusion(scmd);
  2686. else {
  2687. sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
  2688. ret = FAILED;
  2689. }
  2690. return ret;
  2691. }
  2692. /**
  2693. * megasas_bios_param - Returns disk geometry for a disk
  2694. * @sdev: device handle
  2695. * @bdev: block device
  2696. * @capacity: drive capacity
  2697. * @geom: geometry parameters
  2698. */
  2699. static int
  2700. megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
  2701. sector_t capacity, int geom[])
  2702. {
  2703. int heads;
  2704. int sectors;
  2705. sector_t cylinders;
  2706. unsigned long tmp;
  2707. /* Default heads (64) & sectors (32) */
  2708. heads = 64;
  2709. sectors = 32;
  2710. tmp = heads * sectors;
  2711. cylinders = capacity;
  2712. sector_div(cylinders, tmp);
  2713. /*
  2714. * Handle extended translation size for logical drives > 1Gb
  2715. */
  2716. if (capacity >= 0x200000) {
  2717. heads = 255;
  2718. sectors = 63;
  2719. tmp = heads*sectors;
  2720. cylinders = capacity;
  2721. sector_div(cylinders, tmp);
  2722. }
  2723. geom[0] = heads;
  2724. geom[1] = sectors;
  2725. geom[2] = cylinders;
  2726. return 0;
  2727. }
  2728. static void megasas_map_queues(struct Scsi_Host *shost)
  2729. {
  2730. struct megasas_instance *instance;
  2731. int qoff = 0, offset;
  2732. struct blk_mq_queue_map *map;
  2733. instance = (struct megasas_instance *)shost->hostdata;
  2734. if (shost->nr_hw_queues == 1)
  2735. return;
  2736. offset = instance->low_latency_index_start;
  2737. /* Setup Default hctx */
  2738. map = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
  2739. map->nr_queues = instance->msix_vectors - offset;
  2740. map->queue_offset = 0;
  2741. blk_mq_pci_map_queues(map, instance->pdev, offset);
  2742. qoff += map->nr_queues;
  2743. offset += map->nr_queues;
  2744. /* we never use READ queue, so can't cheat blk-mq */
  2745. shost->tag_set.map[HCTX_TYPE_READ].nr_queues = 0;
  2746. /* Setup Poll hctx */
  2747. map = &shost->tag_set.map[HCTX_TYPE_POLL];
  2748. map->nr_queues = instance->iopoll_q_count;
  2749. if (map->nr_queues) {
  2750. /*
  2751. * The poll queue(s) doesn't have an IRQ (and hence IRQ
  2752. * affinity), so use the regular blk-mq cpu mapping
  2753. */
  2754. map->queue_offset = qoff;
  2755. blk_mq_map_queues(map);
  2756. }
  2757. }
  2758. static void megasas_aen_polling(struct work_struct *work);
  2759. /**
  2760. * megasas_service_aen - Processes an event notification
  2761. * @instance: Adapter soft state
  2762. * @cmd: AEN command completed by the ISR
  2763. *
  2764. * For AEN, driver sends a command down to FW that is held by the FW till an
  2765. * event occurs. When an event of interest occurs, FW completes the command
  2766. * that it was previously holding.
  2767. *
  2768. * This routines sends SIGIO signal to processes that have registered with the
  2769. * driver for AEN.
  2770. */
  2771. static void
  2772. megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
  2773. {
  2774. unsigned long flags;
  2775. /*
  2776. * Don't signal app if it is just an aborted previously registered aen
  2777. */
  2778. if ((!cmd->abort_aen) && (instance->unload == 0)) {
  2779. spin_lock_irqsave(&poll_aen_lock, flags);
  2780. megasas_poll_wait_aen = 1;
  2781. spin_unlock_irqrestore(&poll_aen_lock, flags);
  2782. wake_up(&megasas_poll_wait);
  2783. kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
  2784. }
  2785. else
  2786. cmd->abort_aen = 0;
  2787. instance->aen_cmd = NULL;
  2788. megasas_return_cmd(instance, cmd);
  2789. if ((instance->unload == 0) &&
  2790. ((instance->issuepend_done == 1))) {
  2791. struct megasas_aen_event *ev;
  2792. ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
  2793. if (!ev) {
  2794. dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
  2795. } else {
  2796. ev->instance = instance;
  2797. instance->ev = ev;
  2798. INIT_DELAYED_WORK(&ev->hotplug_work,
  2799. megasas_aen_polling);
  2800. schedule_delayed_work(&ev->hotplug_work, 0);
  2801. }
  2802. }
  2803. }
  2804. static ssize_t
  2805. fw_crash_buffer_store(struct device *cdev,
  2806. struct device_attribute *attr, const char *buf, size_t count)
  2807. {
  2808. struct Scsi_Host *shost = class_to_shost(cdev);
  2809. struct megasas_instance *instance =
  2810. (struct megasas_instance *) shost->hostdata;
  2811. int val = 0;
  2812. if (kstrtoint(buf, 0, &val) != 0)
  2813. return -EINVAL;
  2814. mutex_lock(&instance->crashdump_lock);
  2815. instance->fw_crash_buffer_offset = val;
  2816. mutex_unlock(&instance->crashdump_lock);
  2817. return strlen(buf);
  2818. }
  2819. static ssize_t
  2820. fw_crash_buffer_show(struct device *cdev,
  2821. struct device_attribute *attr, char *buf)
  2822. {
  2823. struct Scsi_Host *shost = class_to_shost(cdev);
  2824. struct megasas_instance *instance =
  2825. (struct megasas_instance *) shost->hostdata;
  2826. u32 size;
  2827. unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
  2828. unsigned long chunk_left_bytes;
  2829. unsigned long src_addr;
  2830. u32 buff_offset;
  2831. mutex_lock(&instance->crashdump_lock);
  2832. buff_offset = instance->fw_crash_buffer_offset;
  2833. if (!instance->crash_dump_buf ||
  2834. !((instance->fw_crash_state == AVAILABLE) ||
  2835. (instance->fw_crash_state == COPYING))) {
  2836. dev_err(&instance->pdev->dev,
  2837. "Firmware crash dump is not available\n");
  2838. mutex_unlock(&instance->crashdump_lock);
  2839. return -EINVAL;
  2840. }
  2841. if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
  2842. dev_err(&instance->pdev->dev,
  2843. "Firmware crash dump offset is out of range\n");
  2844. mutex_unlock(&instance->crashdump_lock);
  2845. return 0;
  2846. }
  2847. size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
  2848. chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
  2849. size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
  2850. size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
  2851. src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
  2852. (buff_offset % dmachunk);
  2853. memcpy(buf, (void *)src_addr, size);
  2854. mutex_unlock(&instance->crashdump_lock);
  2855. return size;
  2856. }
  2857. static ssize_t
  2858. fw_crash_buffer_size_show(struct device *cdev,
  2859. struct device_attribute *attr, char *buf)
  2860. {
  2861. struct Scsi_Host *shost = class_to_shost(cdev);
  2862. struct megasas_instance *instance =
  2863. (struct megasas_instance *) shost->hostdata;
  2864. return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
  2865. ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
  2866. }
  2867. static ssize_t
  2868. fw_crash_state_store(struct device *cdev,
  2869. struct device_attribute *attr, const char *buf, size_t count)
  2870. {
  2871. struct Scsi_Host *shost = class_to_shost(cdev);
  2872. struct megasas_instance *instance =
  2873. (struct megasas_instance *) shost->hostdata;
  2874. int val = 0;
  2875. if (kstrtoint(buf, 0, &val) != 0)
  2876. return -EINVAL;
  2877. if ((val <= AVAILABLE || val > COPY_ERROR)) {
  2878. dev_err(&instance->pdev->dev, "application updates invalid "
  2879. "firmware crash state\n");
  2880. return -EINVAL;
  2881. }
  2882. instance->fw_crash_state = val;
  2883. if ((val == COPIED) || (val == COPY_ERROR)) {
  2884. mutex_lock(&instance->crashdump_lock);
  2885. megasas_free_host_crash_buffer(instance);
  2886. mutex_unlock(&instance->crashdump_lock);
  2887. if (val == COPY_ERROR)
  2888. dev_info(&instance->pdev->dev, "application failed to "
  2889. "copy Firmware crash dump\n");
  2890. else
  2891. dev_info(&instance->pdev->dev, "Firmware crash dump "
  2892. "copied successfully\n");
  2893. }
  2894. return strlen(buf);
  2895. }
  2896. static ssize_t
  2897. fw_crash_state_show(struct device *cdev,
  2898. struct device_attribute *attr, char *buf)
  2899. {
  2900. struct Scsi_Host *shost = class_to_shost(cdev);
  2901. struct megasas_instance *instance =
  2902. (struct megasas_instance *) shost->hostdata;
  2903. return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
  2904. }
  2905. static ssize_t
  2906. page_size_show(struct device *cdev,
  2907. struct device_attribute *attr, char *buf)
  2908. {
  2909. return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
  2910. }
  2911. static ssize_t
  2912. ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
  2913. char *buf)
  2914. {
  2915. struct Scsi_Host *shost = class_to_shost(cdev);
  2916. struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
  2917. return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
  2918. }
  2919. static ssize_t
  2920. fw_cmds_outstanding_show(struct device *cdev,
  2921. struct device_attribute *attr, char *buf)
  2922. {
  2923. struct Scsi_Host *shost = class_to_shost(cdev);
  2924. struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
  2925. return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
  2926. }
  2927. static ssize_t
  2928. enable_sdev_max_qd_show(struct device *cdev,
  2929. struct device_attribute *attr, char *buf)
  2930. {
  2931. struct Scsi_Host *shost = class_to_shost(cdev);
  2932. struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
  2933. return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd);
  2934. }
  2935. static ssize_t
  2936. enable_sdev_max_qd_store(struct device *cdev,
  2937. struct device_attribute *attr, const char *buf, size_t count)
  2938. {
  2939. struct Scsi_Host *shost = class_to_shost(cdev);
  2940. struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
  2941. u32 val = 0;
  2942. bool is_target_prop;
  2943. int ret_target_prop = DCMD_FAILED;
  2944. struct scsi_device *sdev;
  2945. if (kstrtou32(buf, 0, &val) != 0) {
  2946. pr_err("megasas: could not set enable_sdev_max_qd\n");
  2947. return -EINVAL;
  2948. }
  2949. mutex_lock(&instance->reset_mutex);
  2950. if (val)
  2951. instance->enable_sdev_max_qd = true;
  2952. else
  2953. instance->enable_sdev_max_qd = false;
  2954. shost_for_each_device(sdev, shost) {
  2955. ret_target_prop = megasas_get_target_prop(instance, sdev);
  2956. is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
  2957. megasas_set_fw_assisted_qd(sdev, is_target_prop);
  2958. }
  2959. mutex_unlock(&instance->reset_mutex);
  2960. return strlen(buf);
  2961. }
  2962. static ssize_t
  2963. dump_system_regs_show(struct device *cdev,
  2964. struct device_attribute *attr, char *buf)
  2965. {
  2966. struct Scsi_Host *shost = class_to_shost(cdev);
  2967. struct megasas_instance *instance =
  2968. (struct megasas_instance *)shost->hostdata;
  2969. return megasas_dump_sys_regs(instance->reg_set, buf);
  2970. }
  2971. static ssize_t
  2972. raid_map_id_show(struct device *cdev, struct device_attribute *attr,
  2973. char *buf)
  2974. {
  2975. struct Scsi_Host *shost = class_to_shost(cdev);
  2976. struct megasas_instance *instance =
  2977. (struct megasas_instance *)shost->hostdata;
  2978. return snprintf(buf, PAGE_SIZE, "%ld\n",
  2979. (unsigned long)instance->map_id);
  2980. }
  2981. static DEVICE_ATTR_RW(fw_crash_buffer);
  2982. static DEVICE_ATTR_RO(fw_crash_buffer_size);
  2983. static DEVICE_ATTR_RW(fw_crash_state);
  2984. static DEVICE_ATTR_RO(page_size);
  2985. static DEVICE_ATTR_RO(ldio_outstanding);
  2986. static DEVICE_ATTR_RO(fw_cmds_outstanding);
  2987. static DEVICE_ATTR_RW(enable_sdev_max_qd);
  2988. static DEVICE_ATTR_RO(dump_system_regs);
  2989. static DEVICE_ATTR_RO(raid_map_id);
  2990. static struct attribute *megaraid_host_attrs[] = {
  2991. &dev_attr_fw_crash_buffer_size.attr,
  2992. &dev_attr_fw_crash_buffer.attr,
  2993. &dev_attr_fw_crash_state.attr,
  2994. &dev_attr_page_size.attr,
  2995. &dev_attr_ldio_outstanding.attr,
  2996. &dev_attr_fw_cmds_outstanding.attr,
  2997. &dev_attr_enable_sdev_max_qd.attr,
  2998. &dev_attr_dump_system_regs.attr,
  2999. &dev_attr_raid_map_id.attr,
  3000. NULL,
  3001. };
  3002. ATTRIBUTE_GROUPS(megaraid_host);
  3003. /*
  3004. * Scsi host template for megaraid_sas driver
  3005. */
  3006. static struct scsi_host_template megasas_template = {
  3007. .module = THIS_MODULE,
  3008. .name = "Avago SAS based MegaRAID driver",
  3009. .proc_name = "megaraid_sas",
  3010. .slave_configure = megasas_slave_configure,
  3011. .slave_alloc = megasas_slave_alloc,
  3012. .slave_destroy = megasas_slave_destroy,
  3013. .queuecommand = megasas_queue_command,
  3014. .eh_target_reset_handler = megasas_reset_target,
  3015. .eh_abort_handler = megasas_task_abort,
  3016. .eh_host_reset_handler = megasas_reset_bus_host,
  3017. .eh_timed_out = megasas_reset_timer,
  3018. .shost_groups = megaraid_host_groups,
  3019. .bios_param = megasas_bios_param,
  3020. .map_queues = megasas_map_queues,
  3021. .mq_poll = megasas_blk_mq_poll,
  3022. .change_queue_depth = scsi_change_queue_depth,
  3023. .max_segment_size = 0xffffffff,
  3024. .cmd_size = sizeof(struct megasas_cmd_priv),
  3025. };
  3026. /**
  3027. * megasas_complete_int_cmd - Completes an internal command
  3028. * @instance: Adapter soft state
  3029. * @cmd: Command to be completed
  3030. *
  3031. * The megasas_issue_blocked_cmd() function waits for a command to complete
  3032. * after it issues a command. This function wakes up that waiting routine by
  3033. * calling wake_up() on the wait queue.
  3034. */
  3035. static void
  3036. megasas_complete_int_cmd(struct megasas_instance *instance,
  3037. struct megasas_cmd *cmd)
  3038. {
  3039. if (cmd->cmd_status_drv == DCMD_INIT)
  3040. cmd->cmd_status_drv =
  3041. (cmd->frame->io.cmd_status == MFI_STAT_OK) ?
  3042. DCMD_SUCCESS : DCMD_FAILED;
  3043. wake_up(&instance->int_cmd_wait_q);
  3044. }
  3045. /**
  3046. * megasas_complete_abort - Completes aborting a command
  3047. * @instance: Adapter soft state
  3048. * @cmd: Cmd that was issued to abort another cmd
  3049. *
  3050. * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
  3051. * after it issues an abort on a previously issued command. This function
  3052. * wakes up all functions waiting on the same wait queue.
  3053. */
  3054. static void
  3055. megasas_complete_abort(struct megasas_instance *instance,
  3056. struct megasas_cmd *cmd)
  3057. {
  3058. if (cmd->sync_cmd) {
  3059. cmd->sync_cmd = 0;
  3060. cmd->cmd_status_drv = DCMD_SUCCESS;
  3061. wake_up(&instance->abort_cmd_wait_q);
  3062. }
  3063. }
  3064. static void
  3065. megasas_set_ld_removed_by_fw(struct megasas_instance *instance)
  3066. {
  3067. uint i;
  3068. for (i = 0; (i < MEGASAS_MAX_LD_IDS); i++) {
  3069. if (instance->ld_ids_prev[i] != 0xff &&
  3070. instance->ld_ids_from_raidmap[i] == 0xff) {
  3071. if (megasas_dbg_lvl & LD_PD_DEBUG)
  3072. dev_info(&instance->pdev->dev,
  3073. "LD target ID %d removed from RAID map\n", i);
  3074. instance->ld_tgtid_status[i] = LD_TARGET_ID_DELETED;
  3075. }
  3076. }
  3077. }
  3078. /**
  3079. * megasas_complete_cmd - Completes a command
  3080. * @instance: Adapter soft state
  3081. * @cmd: Command to be completed
  3082. * @alt_status: If non-zero, use this value as status to
  3083. * SCSI mid-layer instead of the value returned
  3084. * by the FW. This should be used if caller wants
  3085. * an alternate status (as in the case of aborted
  3086. * commands)
  3087. */
  3088. void
  3089. megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
  3090. u8 alt_status)
  3091. {
  3092. int exception = 0;
  3093. struct megasas_header *hdr = &cmd->frame->hdr;
  3094. unsigned long flags;
  3095. struct fusion_context *fusion = instance->ctrl_context;
  3096. u32 opcode, status;
  3097. /* flag for the retry reset */
  3098. cmd->retry_for_fw_reset = 0;
  3099. if (cmd->scmd)
  3100. megasas_priv(cmd->scmd)->cmd_priv = NULL;
  3101. switch (hdr->cmd) {
  3102. case MFI_CMD_INVALID:
  3103. /* Some older 1068 controller FW may keep a pended
  3104. MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
  3105. when booting the kdump kernel. Ignore this command to
  3106. prevent a kernel panic on shutdown of the kdump kernel. */
  3107. dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
  3108. "completed\n");
  3109. dev_warn(&instance->pdev->dev, "If you have a controller "
  3110. "other than PERC5, please upgrade your firmware\n");
  3111. break;
  3112. case MFI_CMD_PD_SCSI_IO:
  3113. case MFI_CMD_LD_SCSI_IO:
  3114. /*
  3115. * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
  3116. * issued either through an IO path or an IOCTL path. If it
  3117. * was via IOCTL, we will send it to internal completion.
  3118. */
  3119. if (cmd->sync_cmd) {
  3120. cmd->sync_cmd = 0;
  3121. megasas_complete_int_cmd(instance, cmd);
  3122. break;
  3123. }
  3124. fallthrough;
  3125. case MFI_CMD_LD_READ:
  3126. case MFI_CMD_LD_WRITE:
  3127. if (alt_status) {
  3128. cmd->scmd->result = alt_status << 16;
  3129. exception = 1;
  3130. }
  3131. if (exception) {
  3132. atomic_dec(&instance->fw_outstanding);
  3133. scsi_dma_unmap(cmd->scmd);
  3134. scsi_done(cmd->scmd);
  3135. megasas_return_cmd(instance, cmd);
  3136. break;
  3137. }
  3138. switch (hdr->cmd_status) {
  3139. case MFI_STAT_OK:
  3140. cmd->scmd->result = DID_OK << 16;
  3141. break;
  3142. case MFI_STAT_SCSI_IO_FAILED:
  3143. case MFI_STAT_LD_INIT_IN_PROGRESS:
  3144. cmd->scmd->result =
  3145. (DID_ERROR << 16) | hdr->scsi_status;
  3146. break;
  3147. case MFI_STAT_SCSI_DONE_WITH_ERROR:
  3148. cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
  3149. if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
  3150. memset(cmd->scmd->sense_buffer, 0,
  3151. SCSI_SENSE_BUFFERSIZE);
  3152. memcpy(cmd->scmd->sense_buffer, cmd->sense,
  3153. hdr->sense_len);
  3154. }
  3155. break;
  3156. case MFI_STAT_LD_OFFLINE:
  3157. case MFI_STAT_DEVICE_NOT_FOUND:
  3158. cmd->scmd->result = DID_BAD_TARGET << 16;
  3159. break;
  3160. default:
  3161. dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
  3162. hdr->cmd_status);
  3163. cmd->scmd->result = DID_ERROR << 16;
  3164. break;
  3165. }
  3166. atomic_dec(&instance->fw_outstanding);
  3167. scsi_dma_unmap(cmd->scmd);
  3168. scsi_done(cmd->scmd);
  3169. megasas_return_cmd(instance, cmd);
  3170. break;
  3171. case MFI_CMD_SMP:
  3172. case MFI_CMD_STP:
  3173. case MFI_CMD_NVME:
  3174. case MFI_CMD_TOOLBOX:
  3175. megasas_complete_int_cmd(instance, cmd);
  3176. break;
  3177. case MFI_CMD_DCMD:
  3178. opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
  3179. /* Check for LD map update */
  3180. if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
  3181. && (cmd->frame->dcmd.mbox.b[1] == 1)) {
  3182. fusion->fast_path_io = 0;
  3183. spin_lock_irqsave(instance->host->host_lock, flags);
  3184. status = cmd->frame->hdr.cmd_status;
  3185. instance->map_update_cmd = NULL;
  3186. if (status != MFI_STAT_OK) {
  3187. if (status != MFI_STAT_NOT_FOUND)
  3188. dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
  3189. cmd->frame->hdr.cmd_status);
  3190. else {
  3191. megasas_return_cmd(instance, cmd);
  3192. spin_unlock_irqrestore(
  3193. instance->host->host_lock,
  3194. flags);
  3195. break;
  3196. }
  3197. }
  3198. megasas_return_cmd(instance, cmd);
  3199. /*
  3200. * Set fast path IO to ZERO.
  3201. * Validate Map will set proper value.
  3202. * Meanwhile all IOs will go as LD IO.
  3203. */
  3204. if (status == MFI_STAT_OK &&
  3205. (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
  3206. instance->map_id++;
  3207. fusion->fast_path_io = 1;
  3208. } else {
  3209. fusion->fast_path_io = 0;
  3210. }
  3211. if (instance->adapter_type >= INVADER_SERIES)
  3212. megasas_set_ld_removed_by_fw(instance);
  3213. megasas_sync_map_info(instance);
  3214. spin_unlock_irqrestore(instance->host->host_lock,
  3215. flags);
  3216. break;
  3217. }
  3218. if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
  3219. opcode == MR_DCMD_CTRL_EVENT_GET) {
  3220. spin_lock_irqsave(&poll_aen_lock, flags);
  3221. megasas_poll_wait_aen = 0;
  3222. spin_unlock_irqrestore(&poll_aen_lock, flags);
  3223. }
  3224. /* FW has an updated PD sequence */
  3225. if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
  3226. (cmd->frame->dcmd.mbox.b[0] == 1)) {
  3227. spin_lock_irqsave(instance->host->host_lock, flags);
  3228. status = cmd->frame->hdr.cmd_status;
  3229. instance->jbod_seq_cmd = NULL;
  3230. megasas_return_cmd(instance, cmd);
  3231. if (status == MFI_STAT_OK) {
  3232. instance->pd_seq_map_id++;
  3233. /* Re-register a pd sync seq num cmd */
  3234. if (megasas_sync_pd_seq_num(instance, true))
  3235. instance->use_seqnum_jbod_fp = false;
  3236. } else
  3237. instance->use_seqnum_jbod_fp = false;
  3238. spin_unlock_irqrestore(instance->host->host_lock, flags);
  3239. break;
  3240. }
  3241. /*
  3242. * See if got an event notification
  3243. */
  3244. if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
  3245. megasas_service_aen(instance, cmd);
  3246. else
  3247. megasas_complete_int_cmd(instance, cmd);
  3248. break;
  3249. case MFI_CMD_ABORT:
  3250. /*
  3251. * Cmd issued to abort another cmd returned
  3252. */
  3253. megasas_complete_abort(instance, cmd);
  3254. break;
  3255. default:
  3256. dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
  3257. hdr->cmd);
  3258. megasas_complete_int_cmd(instance, cmd);
  3259. break;
  3260. }
  3261. }
  3262. /**
  3263. * megasas_issue_pending_cmds_again - issue all pending cmds
  3264. * in FW again because of the fw reset
  3265. * @instance: Adapter soft state
  3266. */
  3267. static inline void
  3268. megasas_issue_pending_cmds_again(struct megasas_instance *instance)
  3269. {
  3270. struct megasas_cmd *cmd;
  3271. struct list_head clist_local;
  3272. union megasas_evt_class_locale class_locale;
  3273. unsigned long flags;
  3274. u32 seq_num;
  3275. INIT_LIST_HEAD(&clist_local);
  3276. spin_lock_irqsave(&instance->hba_lock, flags);
  3277. list_splice_init(&instance->internal_reset_pending_q, &clist_local);
  3278. spin_unlock_irqrestore(&instance->hba_lock, flags);
  3279. while (!list_empty(&clist_local)) {
  3280. cmd = list_entry((&clist_local)->next,
  3281. struct megasas_cmd, list);
  3282. list_del_init(&cmd->list);
  3283. if (cmd->sync_cmd || cmd->scmd) {
  3284. dev_notice(&instance->pdev->dev, "command %p, %p:%d"
  3285. "detected to be pending while HBA reset\n",
  3286. cmd, cmd->scmd, cmd->sync_cmd);
  3287. cmd->retry_for_fw_reset++;
  3288. if (cmd->retry_for_fw_reset == 3) {
  3289. dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
  3290. "was tried multiple times during reset."
  3291. "Shutting down the HBA\n",
  3292. cmd, cmd->scmd, cmd->sync_cmd);
  3293. instance->instancet->disable_intr(instance);
  3294. atomic_set(&instance->fw_reset_no_pci_access, 1);
  3295. megaraid_sas_kill_hba(instance);
  3296. return;
  3297. }
  3298. }
  3299. if (cmd->sync_cmd == 1) {
  3300. if (cmd->scmd) {
  3301. dev_notice(&instance->pdev->dev, "unexpected"
  3302. "cmd attached to internal command!\n");
  3303. }
  3304. dev_notice(&instance->pdev->dev, "%p synchronous cmd"
  3305. "on the internal reset queue,"
  3306. "issue it again.\n", cmd);
  3307. cmd->cmd_status_drv = DCMD_INIT;
  3308. instance->instancet->fire_cmd(instance,
  3309. cmd->frame_phys_addr,
  3310. 0, instance->reg_set);
  3311. } else if (cmd->scmd) {
  3312. dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
  3313. "detected on the internal queue, issue again.\n",
  3314. cmd, cmd->scmd->cmnd[0]);
  3315. atomic_inc(&instance->fw_outstanding);
  3316. instance->instancet->fire_cmd(instance,
  3317. cmd->frame_phys_addr,
  3318. cmd->frame_count-1, instance->reg_set);
  3319. } else {
  3320. dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
  3321. "internal reset defer list while re-issue!!\n",
  3322. cmd);
  3323. }
  3324. }
  3325. if (instance->aen_cmd) {
  3326. dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
  3327. megasas_return_cmd(instance, instance->aen_cmd);
  3328. instance->aen_cmd = NULL;
  3329. }
  3330. /*
  3331. * Initiate AEN (Asynchronous Event Notification)
  3332. */
  3333. seq_num = instance->last_seq_num;
  3334. class_locale.members.reserved = 0;
  3335. class_locale.members.locale = MR_EVT_LOCALE_ALL;
  3336. class_locale.members.class = MR_EVT_CLASS_DEBUG;
  3337. megasas_register_aen(instance, seq_num, class_locale.word);
  3338. }
  3339. /*
  3340. * Move the internal reset pending commands to a deferred queue.
  3341. *
  3342. * We move the commands pending at internal reset time to a
  3343. * pending queue. This queue would be flushed after successful
  3344. * completion of the internal reset sequence. if the internal reset
  3345. * did not complete in time, the kernel reset handler would flush
  3346. * these commands.
  3347. */
  3348. static void
  3349. megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
  3350. {
  3351. struct megasas_cmd *cmd;
  3352. int i;
  3353. u16 max_cmd = instance->max_fw_cmds;
  3354. u32 defer_index;
  3355. unsigned long flags;
  3356. defer_index = 0;
  3357. spin_lock_irqsave(&instance->mfi_pool_lock, flags);
  3358. for (i = 0; i < max_cmd; i++) {
  3359. cmd = instance->cmd_list[i];
  3360. if (cmd->sync_cmd == 1 || cmd->scmd) {
  3361. dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
  3362. "on the defer queue as internal\n",
  3363. defer_index, cmd, cmd->sync_cmd, cmd->scmd);
  3364. if (!list_empty(&cmd->list)) {
  3365. dev_notice(&instance->pdev->dev, "ERROR while"
  3366. " moving this cmd:%p, %d %p, it was"
  3367. "discovered on some list?\n",
  3368. cmd, cmd->sync_cmd, cmd->scmd);
  3369. list_del_init(&cmd->list);
  3370. }
  3371. defer_index++;
  3372. list_add_tail(&cmd->list,
  3373. &instance->internal_reset_pending_q);
  3374. }
  3375. }
  3376. spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
  3377. }
  3378. static void
  3379. process_fw_state_change_wq(struct work_struct *work)
  3380. {
  3381. struct megasas_instance *instance =
  3382. container_of(work, struct megasas_instance, work_init);
  3383. u32 wait;
  3384. unsigned long flags;
  3385. if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
  3386. dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
  3387. atomic_read(&instance->adprecovery));
  3388. return ;
  3389. }
  3390. if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
  3391. dev_notice(&instance->pdev->dev, "FW detected to be in fault"
  3392. "state, restarting it...\n");
  3393. instance->instancet->disable_intr(instance);
  3394. atomic_set(&instance->fw_outstanding, 0);
  3395. atomic_set(&instance->fw_reset_no_pci_access, 1);
  3396. instance->instancet->adp_reset(instance, instance->reg_set);
  3397. atomic_set(&instance->fw_reset_no_pci_access, 0);
  3398. dev_notice(&instance->pdev->dev, "FW restarted successfully,"
  3399. "initiating next stage...\n");
  3400. dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
  3401. "state 2 starting...\n");
  3402. /* waiting for about 20 second before start the second init */
  3403. for (wait = 0; wait < 30; wait++) {
  3404. msleep(1000);
  3405. }
  3406. if (megasas_transition_to_ready(instance, 1)) {
  3407. dev_notice(&instance->pdev->dev, "adapter not ready\n");
  3408. atomic_set(&instance->fw_reset_no_pci_access, 1);
  3409. megaraid_sas_kill_hba(instance);
  3410. return ;
  3411. }
  3412. if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
  3413. (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
  3414. (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
  3415. ) {
  3416. *instance->consumer = *instance->producer;
  3417. } else {
  3418. *instance->consumer = 0;
  3419. *instance->producer = 0;
  3420. }
  3421. megasas_issue_init_mfi(instance);
  3422. spin_lock_irqsave(&instance->hba_lock, flags);
  3423. atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
  3424. spin_unlock_irqrestore(&instance->hba_lock, flags);
  3425. instance->instancet->enable_intr(instance);
  3426. megasas_issue_pending_cmds_again(instance);
  3427. instance->issuepend_done = 1;
  3428. }
  3429. }
  3430. /**
  3431. * megasas_deplete_reply_queue - Processes all completed commands
  3432. * @instance: Adapter soft state
  3433. * @alt_status: Alternate status to be returned to
  3434. * SCSI mid-layer instead of the status
  3435. * returned by the FW
  3436. * Note: this must be called with hba lock held
  3437. */
  3438. static int
  3439. megasas_deplete_reply_queue(struct megasas_instance *instance,
  3440. u8 alt_status)
  3441. {
  3442. u32 mfiStatus;
  3443. u32 fw_state;
  3444. if (instance->instancet->check_reset(instance, instance->reg_set) == 1)
  3445. return IRQ_HANDLED;
  3446. mfiStatus = instance->instancet->clear_intr(instance);
  3447. if (mfiStatus == 0) {
  3448. /* Hardware may not set outbound_intr_status in MSI-X mode */
  3449. if (!instance->msix_vectors)
  3450. return IRQ_NONE;
  3451. }
  3452. instance->mfiStatus = mfiStatus;
  3453. if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
  3454. fw_state = instance->instancet->read_fw_status_reg(
  3455. instance) & MFI_STATE_MASK;
  3456. if (fw_state != MFI_STATE_FAULT) {
  3457. dev_notice(&instance->pdev->dev, "fw state:%x\n",
  3458. fw_state);
  3459. }
  3460. if ((fw_state == MFI_STATE_FAULT) &&
  3461. (instance->disableOnlineCtrlReset == 0)) {
  3462. dev_notice(&instance->pdev->dev, "wait adp restart\n");
  3463. if ((instance->pdev->device ==
  3464. PCI_DEVICE_ID_LSI_SAS1064R) ||
  3465. (instance->pdev->device ==
  3466. PCI_DEVICE_ID_DELL_PERC5) ||
  3467. (instance->pdev->device ==
  3468. PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
  3469. *instance->consumer =
  3470. cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
  3471. }
  3472. instance->instancet->disable_intr(instance);
  3473. atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
  3474. instance->issuepend_done = 0;
  3475. atomic_set(&instance->fw_outstanding, 0);
  3476. megasas_internal_reset_defer_cmds(instance);
  3477. dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
  3478. fw_state, atomic_read(&instance->adprecovery));
  3479. schedule_work(&instance->work_init);
  3480. return IRQ_HANDLED;
  3481. } else {
  3482. dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
  3483. fw_state, instance->disableOnlineCtrlReset);
  3484. }
  3485. }
  3486. tasklet_schedule(&instance->isr_tasklet);
  3487. return IRQ_HANDLED;
  3488. }
  3489. /**
  3490. * megasas_isr - isr entry point
  3491. * @irq: IRQ number
  3492. * @devp: IRQ context address
  3493. */
  3494. static irqreturn_t megasas_isr(int irq, void *devp)
  3495. {
  3496. struct megasas_irq_context *irq_context = devp;
  3497. struct megasas_instance *instance = irq_context->instance;
  3498. unsigned long flags;
  3499. irqreturn_t rc;
  3500. if (atomic_read(&instance->fw_reset_no_pci_access))
  3501. return IRQ_HANDLED;
  3502. spin_lock_irqsave(&instance->hba_lock, flags);
  3503. rc = megasas_deplete_reply_queue(instance, DID_OK);
  3504. spin_unlock_irqrestore(&instance->hba_lock, flags);
  3505. return rc;
  3506. }
  3507. /**
  3508. * megasas_transition_to_ready - Move the FW to READY state
  3509. * @instance: Adapter soft state
  3510. * @ocr: Adapter reset state
  3511. *
  3512. * During the initialization, FW passes can potentially be in any one of
  3513. * several possible states. If the FW in operational, waiting-for-handshake
  3514. * states, driver must take steps to bring it to ready state. Otherwise, it
  3515. * has to wait for the ready state.
  3516. */
  3517. int
  3518. megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
  3519. {
  3520. int i;
  3521. u8 max_wait;
  3522. u32 fw_state;
  3523. u32 abs_state, curr_abs_state;
  3524. abs_state = instance->instancet->read_fw_status_reg(instance);
  3525. fw_state = abs_state & MFI_STATE_MASK;
  3526. if (fw_state != MFI_STATE_READY)
  3527. dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
  3528. " state\n");
  3529. while (fw_state != MFI_STATE_READY) {
  3530. switch (fw_state) {
  3531. case MFI_STATE_FAULT:
  3532. dev_printk(KERN_ERR, &instance->pdev->dev,
  3533. "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n",
  3534. abs_state & MFI_STATE_FAULT_CODE,
  3535. abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
  3536. if (ocr) {
  3537. max_wait = MEGASAS_RESET_WAIT_TIME;
  3538. break;
  3539. } else {
  3540. dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
  3541. megasas_dump_reg_set(instance->reg_set);
  3542. return -ENODEV;
  3543. }
  3544. case MFI_STATE_WAIT_HANDSHAKE:
  3545. /*
  3546. * Set the CLR bit in inbound doorbell
  3547. */
  3548. if ((instance->pdev->device ==
  3549. PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
  3550. (instance->pdev->device ==
  3551. PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
  3552. (instance->adapter_type != MFI_SERIES))
  3553. writel(
  3554. MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
  3555. &instance->reg_set->doorbell);
  3556. else
  3557. writel(
  3558. MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
  3559. &instance->reg_set->inbound_doorbell);
  3560. max_wait = MEGASAS_RESET_WAIT_TIME;
  3561. break;
  3562. case MFI_STATE_BOOT_MESSAGE_PENDING:
  3563. if ((instance->pdev->device ==
  3564. PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
  3565. (instance->pdev->device ==
  3566. PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
  3567. (instance->adapter_type != MFI_SERIES))
  3568. writel(MFI_INIT_HOTPLUG,
  3569. &instance->reg_set->doorbell);
  3570. else
  3571. writel(MFI_INIT_HOTPLUG,
  3572. &instance->reg_set->inbound_doorbell);
  3573. max_wait = MEGASAS_RESET_WAIT_TIME;
  3574. break;
  3575. case MFI_STATE_OPERATIONAL:
  3576. /*
  3577. * Bring it to READY state; assuming max wait 10 secs
  3578. */
  3579. instance->instancet->disable_intr(instance);
  3580. if ((instance->pdev->device ==
  3581. PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
  3582. (instance->pdev->device ==
  3583. PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
  3584. (instance->adapter_type != MFI_SERIES)) {
  3585. writel(MFI_RESET_FLAGS,
  3586. &instance->reg_set->doorbell);
  3587. if (instance->adapter_type != MFI_SERIES) {
  3588. for (i = 0; i < (10 * 1000); i += 20) {
  3589. if (megasas_readl(
  3590. instance,
  3591. &instance->
  3592. reg_set->
  3593. doorbell) & 1)
  3594. msleep(20);
  3595. else
  3596. break;
  3597. }
  3598. }
  3599. } else
  3600. writel(MFI_RESET_FLAGS,
  3601. &instance->reg_set->inbound_doorbell);
  3602. max_wait = MEGASAS_RESET_WAIT_TIME;
  3603. break;
  3604. case MFI_STATE_UNDEFINED:
  3605. /*
  3606. * This state should not last for more than 2 seconds
  3607. */
  3608. max_wait = MEGASAS_RESET_WAIT_TIME;
  3609. break;
  3610. case MFI_STATE_BB_INIT:
  3611. max_wait = MEGASAS_RESET_WAIT_TIME;
  3612. break;
  3613. case MFI_STATE_FW_INIT:
  3614. max_wait = MEGASAS_RESET_WAIT_TIME;
  3615. break;
  3616. case MFI_STATE_FW_INIT_2:
  3617. max_wait = MEGASAS_RESET_WAIT_TIME;
  3618. break;
  3619. case MFI_STATE_DEVICE_SCAN:
  3620. max_wait = MEGASAS_RESET_WAIT_TIME;
  3621. break;
  3622. case MFI_STATE_FLUSH_CACHE:
  3623. max_wait = MEGASAS_RESET_WAIT_TIME;
  3624. break;
  3625. default:
  3626. dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
  3627. fw_state);
  3628. dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
  3629. megasas_dump_reg_set(instance->reg_set);
  3630. return -ENODEV;
  3631. }
  3632. /*
  3633. * The cur_state should not last for more than max_wait secs
  3634. */
  3635. for (i = 0; i < max_wait * 50; i++) {
  3636. curr_abs_state = instance->instancet->
  3637. read_fw_status_reg(instance);
  3638. if (abs_state == curr_abs_state) {
  3639. msleep(20);
  3640. } else
  3641. break;
  3642. }
  3643. /*
  3644. * Return error if fw_state hasn't changed after max_wait
  3645. */
  3646. if (curr_abs_state == abs_state) {
  3647. dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
  3648. "in %d secs\n", fw_state, max_wait);
  3649. dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
  3650. megasas_dump_reg_set(instance->reg_set);
  3651. return -ENODEV;
  3652. }
  3653. abs_state = curr_abs_state;
  3654. fw_state = curr_abs_state & MFI_STATE_MASK;
  3655. }
  3656. dev_info(&instance->pdev->dev, "FW now in Ready state\n");
  3657. return 0;
  3658. }
  3659. /**
  3660. * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
  3661. * @instance: Adapter soft state
  3662. */
  3663. static void megasas_teardown_frame_pool(struct megasas_instance *instance)
  3664. {
  3665. int i;
  3666. u16 max_cmd = instance->max_mfi_cmds;
  3667. struct megasas_cmd *cmd;
  3668. if (!instance->frame_dma_pool)
  3669. return;
  3670. /*
  3671. * Return all frames to pool
  3672. */
  3673. for (i = 0; i < max_cmd; i++) {
  3674. cmd = instance->cmd_list[i];
  3675. if (cmd->frame)
  3676. dma_pool_free(instance->frame_dma_pool, cmd->frame,
  3677. cmd->frame_phys_addr);
  3678. if (cmd->sense)
  3679. dma_pool_free(instance->sense_dma_pool, cmd->sense,
  3680. cmd->sense_phys_addr);
  3681. }
  3682. /*
  3683. * Now destroy the pool itself
  3684. */
  3685. dma_pool_destroy(instance->frame_dma_pool);
  3686. dma_pool_destroy(instance->sense_dma_pool);
  3687. instance->frame_dma_pool = NULL;
  3688. instance->sense_dma_pool = NULL;
  3689. }
  3690. /**
  3691. * megasas_create_frame_pool - Creates DMA pool for cmd frames
  3692. * @instance: Adapter soft state
  3693. *
  3694. * Each command packet has an embedded DMA memory buffer that is used for
  3695. * filling MFI frame and the SG list that immediately follows the frame. This
  3696. * function creates those DMA memory buffers for each command packet by using
  3697. * PCI pool facility.
  3698. */
  3699. static int megasas_create_frame_pool(struct megasas_instance *instance)
  3700. {
  3701. int i;
  3702. u16 max_cmd;
  3703. u32 frame_count;
  3704. struct megasas_cmd *cmd;
  3705. max_cmd = instance->max_mfi_cmds;
  3706. /*
  3707. * For MFI controllers.
  3708. * max_num_sge = 60
  3709. * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
  3710. * Total 960 byte (15 MFI frame of 64 byte)
  3711. *
  3712. * Fusion adapter require only 3 extra frame.
  3713. * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
  3714. * max_sge_sz = 12 byte (sizeof megasas_sge64)
  3715. * Total 192 byte (3 MFI frame of 64 byte)
  3716. */
  3717. frame_count = (instance->adapter_type == MFI_SERIES) ?
  3718. (15 + 1) : (3 + 1);
  3719. instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
  3720. /*
  3721. * Use DMA pool facility provided by PCI layer
  3722. */
  3723. instance->frame_dma_pool = dma_pool_create("megasas frame pool",
  3724. &instance->pdev->dev,
  3725. instance->mfi_frame_size, 256, 0);
  3726. if (!instance->frame_dma_pool) {
  3727. dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
  3728. return -ENOMEM;
  3729. }
  3730. instance->sense_dma_pool = dma_pool_create("megasas sense pool",
  3731. &instance->pdev->dev, 128,
  3732. 4, 0);
  3733. if (!instance->sense_dma_pool) {
  3734. dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
  3735. dma_pool_destroy(instance->frame_dma_pool);
  3736. instance->frame_dma_pool = NULL;
  3737. return -ENOMEM;
  3738. }
  3739. /*
  3740. * Allocate and attach a frame to each of the commands in cmd_list.
  3741. * By making cmd->index as the context instead of the &cmd, we can
  3742. * always use 32bit context regardless of the architecture
  3743. */
  3744. for (i = 0; i < max_cmd; i++) {
  3745. cmd = instance->cmd_list[i];
  3746. cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
  3747. GFP_KERNEL, &cmd->frame_phys_addr);
  3748. cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
  3749. GFP_KERNEL, &cmd->sense_phys_addr);
  3750. /*
  3751. * megasas_teardown_frame_pool() takes care of freeing
  3752. * whatever has been allocated
  3753. */
  3754. if (!cmd->frame || !cmd->sense) {
  3755. dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
  3756. megasas_teardown_frame_pool(instance);
  3757. return -ENOMEM;
  3758. }
  3759. cmd->frame->io.context = cpu_to_le32(cmd->index);
  3760. cmd->frame->io.pad_0 = 0;
  3761. if ((instance->adapter_type == MFI_SERIES) && reset_devices)
  3762. cmd->frame->hdr.cmd = MFI_CMD_INVALID;
  3763. }
  3764. return 0;
  3765. }
  3766. /**
  3767. * megasas_free_cmds - Free all the cmds in the free cmd pool
  3768. * @instance: Adapter soft state
  3769. */
  3770. void megasas_free_cmds(struct megasas_instance *instance)
  3771. {
  3772. int i;
  3773. /* First free the MFI frame pool */
  3774. megasas_teardown_frame_pool(instance);
  3775. /* Free all the commands in the cmd_list */
  3776. for (i = 0; i < instance->max_mfi_cmds; i++)
  3777. kfree(instance->cmd_list[i]);
  3778. /* Free the cmd_list buffer itself */
  3779. kfree(instance->cmd_list);
  3780. instance->cmd_list = NULL;
  3781. INIT_LIST_HEAD(&instance->cmd_pool);
  3782. }
  3783. /**
  3784. * megasas_alloc_cmds - Allocates the command packets
  3785. * @instance: Adapter soft state
  3786. *
  3787. * Each command that is issued to the FW, whether IO commands from the OS or
  3788. * internal commands like IOCTLs, are wrapped in local data structure called
  3789. * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
  3790. * the FW.
  3791. *
  3792. * Each frame has a 32-bit field called context (tag). This context is used
  3793. * to get back the megasas_cmd from the frame when a frame gets completed in
  3794. * the ISR. Typically the address of the megasas_cmd itself would be used as
  3795. * the context. But we wanted to keep the differences between 32 and 64 bit
  3796. * systems to the mininum. We always use 32 bit integers for the context. In
  3797. * this driver, the 32 bit values are the indices into an array cmd_list.
  3798. * This array is used only to look up the megasas_cmd given the context. The
  3799. * free commands themselves are maintained in a linked list called cmd_pool.
  3800. */
  3801. int megasas_alloc_cmds(struct megasas_instance *instance)
  3802. {
  3803. int i;
  3804. int j;
  3805. u16 max_cmd;
  3806. struct megasas_cmd *cmd;
  3807. max_cmd = instance->max_mfi_cmds;
  3808. /*
  3809. * instance->cmd_list is an array of struct megasas_cmd pointers.
  3810. * Allocate the dynamic array first and then allocate individual
  3811. * commands.
  3812. */
  3813. instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
  3814. if (!instance->cmd_list) {
  3815. dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
  3816. return -ENOMEM;
  3817. }
  3818. for (i = 0; i < max_cmd; i++) {
  3819. instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
  3820. GFP_KERNEL);
  3821. if (!instance->cmd_list[i]) {
  3822. for (j = 0; j < i; j++)
  3823. kfree(instance->cmd_list[j]);
  3824. kfree(instance->cmd_list);
  3825. instance->cmd_list = NULL;
  3826. return -ENOMEM;
  3827. }
  3828. }
  3829. for (i = 0; i < max_cmd; i++) {
  3830. cmd = instance->cmd_list[i];
  3831. memset(cmd, 0, sizeof(struct megasas_cmd));
  3832. cmd->index = i;
  3833. cmd->scmd = NULL;
  3834. cmd->instance = instance;
  3835. list_add_tail(&cmd->list, &instance->cmd_pool);
  3836. }
  3837. /*
  3838. * Create a frame pool and assign one frame to each cmd
  3839. */
  3840. if (megasas_create_frame_pool(instance)) {
  3841. dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
  3842. megasas_free_cmds(instance);
  3843. return -ENOMEM;
  3844. }
  3845. return 0;
  3846. }
  3847. /*
  3848. * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state.
  3849. * @instance: Adapter soft state
  3850. *
  3851. * Return 0 for only Fusion adapter, if driver load/unload is not in progress
  3852. * or FW is not under OCR.
  3853. */
  3854. inline int
  3855. dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
  3856. if (instance->adapter_type == MFI_SERIES)
  3857. return KILL_ADAPTER;
  3858. else if (instance->unload ||
  3859. test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
  3860. &instance->reset_flags))
  3861. return IGNORE_TIMEOUT;
  3862. else
  3863. return INITIATE_OCR;
  3864. }
  3865. static void
  3866. megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
  3867. {
  3868. int ret;
  3869. struct megasas_cmd *cmd;
  3870. struct megasas_dcmd_frame *dcmd;
  3871. struct MR_PRIV_DEVICE *mr_device_priv_data;
  3872. u16 device_id = 0;
  3873. device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
  3874. cmd = megasas_get_cmd(instance);
  3875. if (!cmd) {
  3876. dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
  3877. return;
  3878. }
  3879. dcmd = &cmd->frame->dcmd;
  3880. memset(instance->pd_info, 0, sizeof(*instance->pd_info));
  3881. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  3882. dcmd->mbox.s[0] = cpu_to_le16(device_id);
  3883. dcmd->cmd = MFI_CMD_DCMD;
  3884. dcmd->cmd_status = 0xFF;
  3885. dcmd->sge_count = 1;
  3886. dcmd->flags = MFI_FRAME_DIR_READ;
  3887. dcmd->timeout = 0;
  3888. dcmd->pad_0 = 0;
  3889. dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
  3890. dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
  3891. megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
  3892. sizeof(struct MR_PD_INFO));
  3893. if ((instance->adapter_type != MFI_SERIES) &&
  3894. !instance->mask_interrupts)
  3895. ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
  3896. else
  3897. ret = megasas_issue_polled(instance, cmd);
  3898. switch (ret) {
  3899. case DCMD_SUCCESS:
  3900. mr_device_priv_data = sdev->hostdata;
  3901. le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
  3902. mr_device_priv_data->interface_type =
  3903. instance->pd_info->state.ddf.pdType.intf;
  3904. break;
  3905. case DCMD_TIMEOUT:
  3906. switch (dcmd_timeout_ocr_possible(instance)) {
  3907. case INITIATE_OCR:
  3908. cmd->flags |= DRV_DCMD_SKIP_REFIRE;
  3909. mutex_unlock(&instance->reset_mutex);
  3910. megasas_reset_fusion(instance->host,
  3911. MFI_IO_TIMEOUT_OCR);
  3912. mutex_lock(&instance->reset_mutex);
  3913. break;
  3914. case KILL_ADAPTER:
  3915. megaraid_sas_kill_hba(instance);
  3916. break;
  3917. case IGNORE_TIMEOUT:
  3918. dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
  3919. __func__, __LINE__);
  3920. break;
  3921. }
  3922. break;
  3923. }
  3924. if (ret != DCMD_TIMEOUT)
  3925. megasas_return_cmd(instance, cmd);
  3926. return;
  3927. }
  3928. /*
  3929. * megasas_get_pd_list_info - Returns FW's pd_list structure
  3930. * @instance: Adapter soft state
  3931. * @pd_list: pd_list structure
  3932. *
  3933. * Issues an internal command (DCMD) to get the FW's controller PD
  3934. * list structure. This information is mainly used to find out SYSTEM
  3935. * supported by the FW.
  3936. */
  3937. static int
  3938. megasas_get_pd_list(struct megasas_instance *instance)
  3939. {
  3940. int ret = 0, pd_index = 0;
  3941. struct megasas_cmd *cmd;
  3942. struct megasas_dcmd_frame *dcmd;
  3943. struct MR_PD_LIST *ci;
  3944. struct MR_PD_ADDRESS *pd_addr;
  3945. if (instance->pd_list_not_supported) {
  3946. dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
  3947. "not supported by firmware\n");
  3948. return ret;
  3949. }
  3950. ci = instance->pd_list_buf;
  3951. cmd = megasas_get_cmd(instance);
  3952. if (!cmd) {
  3953. dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
  3954. return -ENOMEM;
  3955. }
  3956. dcmd = &cmd->frame->dcmd;
  3957. memset(ci, 0, sizeof(*ci));
  3958. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  3959. dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
  3960. dcmd->mbox.b[1] = 0;
  3961. dcmd->cmd = MFI_CMD_DCMD;
  3962. dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
  3963. dcmd->sge_count = 1;
  3964. dcmd->flags = MFI_FRAME_DIR_READ;
  3965. dcmd->timeout = 0;
  3966. dcmd->pad_0 = 0;
  3967. dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
  3968. dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
  3969. megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
  3970. (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
  3971. if ((instance->adapter_type != MFI_SERIES) &&
  3972. !instance->mask_interrupts)
  3973. ret = megasas_issue_blocked_cmd(instance, cmd,
  3974. MFI_IO_TIMEOUT_SECS);
  3975. else
  3976. ret = megasas_issue_polled(instance, cmd);
  3977. switch (ret) {
  3978. case DCMD_FAILED:
  3979. dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
  3980. "failed/not supported by firmware\n");
  3981. if (instance->adapter_type != MFI_SERIES)
  3982. megaraid_sas_kill_hba(instance);
  3983. else
  3984. instance->pd_list_not_supported = 1;
  3985. break;
  3986. case DCMD_TIMEOUT:
  3987. switch (dcmd_timeout_ocr_possible(instance)) {
  3988. case INITIATE_OCR:
  3989. cmd->flags |= DRV_DCMD_SKIP_REFIRE;
  3990. /*
  3991. * DCMD failed from AEN path.
  3992. * AEN path already hold reset_mutex to avoid PCI access
  3993. * while OCR is in progress.
  3994. */
  3995. mutex_unlock(&instance->reset_mutex);
  3996. megasas_reset_fusion(instance->host,
  3997. MFI_IO_TIMEOUT_OCR);
  3998. mutex_lock(&instance->reset_mutex);
  3999. break;
  4000. case KILL_ADAPTER:
  4001. megaraid_sas_kill_hba(instance);
  4002. break;
  4003. case IGNORE_TIMEOUT:
  4004. dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
  4005. __func__, __LINE__);
  4006. break;
  4007. }
  4008. break;
  4009. case DCMD_SUCCESS:
  4010. pd_addr = ci->addr;
  4011. if (megasas_dbg_lvl & LD_PD_DEBUG)
  4012. dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n",
  4013. __func__, le32_to_cpu(ci->count));
  4014. if ((le32_to_cpu(ci->count) >
  4015. (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
  4016. break;
  4017. memset(instance->local_pd_list, 0,
  4018. MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
  4019. for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
  4020. instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
  4021. le16_to_cpu(pd_addr->deviceId);
  4022. instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
  4023. pd_addr->scsiDevType;
  4024. instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
  4025. MR_PD_STATE_SYSTEM;
  4026. if (megasas_dbg_lvl & LD_PD_DEBUG)
  4027. dev_info(&instance->pdev->dev,
  4028. "PD%d: targetID: 0x%03x deviceType:0x%x\n",
  4029. pd_index, le16_to_cpu(pd_addr->deviceId),
  4030. pd_addr->scsiDevType);
  4031. pd_addr++;
  4032. }
  4033. memcpy(instance->pd_list, instance->local_pd_list,
  4034. sizeof(instance->pd_list));
  4035. break;
  4036. }
  4037. if (ret != DCMD_TIMEOUT)
  4038. megasas_return_cmd(instance, cmd);
  4039. return ret;
  4040. }
  4041. /*
  4042. * megasas_get_ld_list_info - Returns FW's ld_list structure
  4043. * @instance: Adapter soft state
  4044. * @ld_list: ld_list structure
  4045. *
  4046. * Issues an internal command (DCMD) to get the FW's controller PD
  4047. * list structure. This information is mainly used to find out SYSTEM
  4048. * supported by the FW.
  4049. */
  4050. static int
  4051. megasas_get_ld_list(struct megasas_instance *instance)
  4052. {
  4053. int ret = 0, ld_index = 0, ids = 0;
  4054. struct megasas_cmd *cmd;
  4055. struct megasas_dcmd_frame *dcmd;
  4056. struct MR_LD_LIST *ci;
  4057. dma_addr_t ci_h = 0;
  4058. u32 ld_count;
  4059. ci = instance->ld_list_buf;
  4060. ci_h = instance->ld_list_buf_h;
  4061. cmd = megasas_get_cmd(instance);
  4062. if (!cmd) {
  4063. dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
  4064. return -ENOMEM;
  4065. }
  4066. dcmd = &cmd->frame->dcmd;
  4067. memset(ci, 0, sizeof(*ci));
  4068. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  4069. if (instance->supportmax256vd)
  4070. dcmd->mbox.b[0] = 1;
  4071. dcmd->cmd = MFI_CMD_DCMD;
  4072. dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
  4073. dcmd->sge_count = 1;
  4074. dcmd->flags = MFI_FRAME_DIR_READ;
  4075. dcmd->timeout = 0;
  4076. dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
  4077. dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
  4078. dcmd->pad_0 = 0;
  4079. megasas_set_dma_settings(instance, dcmd, ci_h,
  4080. sizeof(struct MR_LD_LIST));
  4081. if ((instance->adapter_type != MFI_SERIES) &&
  4082. !instance->mask_interrupts)
  4083. ret = megasas_issue_blocked_cmd(instance, cmd,
  4084. MFI_IO_TIMEOUT_SECS);
  4085. else
  4086. ret = megasas_issue_polled(instance, cmd);
  4087. ld_count = le32_to_cpu(ci->ldCount);
  4088. switch (ret) {
  4089. case DCMD_FAILED:
  4090. megaraid_sas_kill_hba(instance);
  4091. break;
  4092. case DCMD_TIMEOUT:
  4093. switch (dcmd_timeout_ocr_possible(instance)) {
  4094. case INITIATE_OCR:
  4095. cmd->flags |= DRV_DCMD_SKIP_REFIRE;
  4096. /*
  4097. * DCMD failed from AEN path.
  4098. * AEN path already hold reset_mutex to avoid PCI access
  4099. * while OCR is in progress.
  4100. */
  4101. mutex_unlock(&instance->reset_mutex);
  4102. megasas_reset_fusion(instance->host,
  4103. MFI_IO_TIMEOUT_OCR);
  4104. mutex_lock(&instance->reset_mutex);
  4105. break;
  4106. case KILL_ADAPTER:
  4107. megaraid_sas_kill_hba(instance);
  4108. break;
  4109. case IGNORE_TIMEOUT:
  4110. dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
  4111. __func__, __LINE__);
  4112. break;
  4113. }
  4114. break;
  4115. case DCMD_SUCCESS:
  4116. if (megasas_dbg_lvl & LD_PD_DEBUG)
  4117. dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
  4118. __func__, ld_count);
  4119. if (ld_count > instance->fw_supported_vd_count)
  4120. break;
  4121. memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
  4122. for (ld_index = 0; ld_index < ld_count; ld_index++) {
  4123. if (ci->ldList[ld_index].state != 0) {
  4124. ids = ci->ldList[ld_index].ref.targetId;
  4125. instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
  4126. if (megasas_dbg_lvl & LD_PD_DEBUG)
  4127. dev_info(&instance->pdev->dev,
  4128. "LD%d: targetID: 0x%03x\n",
  4129. ld_index, ids);
  4130. }
  4131. }
  4132. break;
  4133. }
  4134. if (ret != DCMD_TIMEOUT)
  4135. megasas_return_cmd(instance, cmd);
  4136. return ret;
  4137. }
  4138. /**
  4139. * megasas_ld_list_query - Returns FW's ld_list structure
  4140. * @instance: Adapter soft state
  4141. * @query_type: ld_list structure type
  4142. *
  4143. * Issues an internal command (DCMD) to get the FW's controller PD
  4144. * list structure. This information is mainly used to find out SYSTEM
  4145. * supported by the FW.
  4146. */
  4147. static int
  4148. megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
  4149. {
  4150. int ret = 0, ld_index = 0, ids = 0;
  4151. struct megasas_cmd *cmd;
  4152. struct megasas_dcmd_frame *dcmd;
  4153. struct MR_LD_TARGETID_LIST *ci;
  4154. dma_addr_t ci_h = 0;
  4155. u32 tgtid_count;
  4156. ci = instance->ld_targetid_list_buf;
  4157. ci_h = instance->ld_targetid_list_buf_h;
  4158. cmd = megasas_get_cmd(instance);
  4159. if (!cmd) {
  4160. dev_warn(&instance->pdev->dev,
  4161. "megasas_ld_list_query: Failed to get cmd\n");
  4162. return -ENOMEM;
  4163. }
  4164. dcmd = &cmd->frame->dcmd;
  4165. memset(ci, 0, sizeof(*ci));
  4166. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  4167. dcmd->mbox.b[0] = query_type;
  4168. if (instance->supportmax256vd)
  4169. dcmd->mbox.b[2] = 1;
  4170. dcmd->cmd = MFI_CMD_DCMD;
  4171. dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
  4172. dcmd->sge_count = 1;
  4173. dcmd->flags = MFI_FRAME_DIR_READ;
  4174. dcmd->timeout = 0;
  4175. dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
  4176. dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
  4177. dcmd->pad_0 = 0;
  4178. megasas_set_dma_settings(instance, dcmd, ci_h,
  4179. sizeof(struct MR_LD_TARGETID_LIST));
  4180. if ((instance->adapter_type != MFI_SERIES) &&
  4181. !instance->mask_interrupts)
  4182. ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
  4183. else
  4184. ret = megasas_issue_polled(instance, cmd);
  4185. switch (ret) {
  4186. case DCMD_FAILED:
  4187. dev_info(&instance->pdev->dev,
  4188. "DCMD not supported by firmware - %s %d\n",
  4189. __func__, __LINE__);
  4190. ret = megasas_get_ld_list(instance);
  4191. break;
  4192. case DCMD_TIMEOUT:
  4193. switch (dcmd_timeout_ocr_possible(instance)) {
  4194. case INITIATE_OCR:
  4195. cmd->flags |= DRV_DCMD_SKIP_REFIRE;
  4196. /*
  4197. * DCMD failed from AEN path.
  4198. * AEN path already hold reset_mutex to avoid PCI access
  4199. * while OCR is in progress.
  4200. */
  4201. mutex_unlock(&instance->reset_mutex);
  4202. megasas_reset_fusion(instance->host,
  4203. MFI_IO_TIMEOUT_OCR);
  4204. mutex_lock(&instance->reset_mutex);
  4205. break;
  4206. case KILL_ADAPTER:
  4207. megaraid_sas_kill_hba(instance);
  4208. break;
  4209. case IGNORE_TIMEOUT:
  4210. dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
  4211. __func__, __LINE__);
  4212. break;
  4213. }
  4214. break;
  4215. case DCMD_SUCCESS:
  4216. tgtid_count = le32_to_cpu(ci->count);
  4217. if (megasas_dbg_lvl & LD_PD_DEBUG)
  4218. dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
  4219. __func__, tgtid_count);
  4220. if ((tgtid_count > (instance->fw_supported_vd_count)))
  4221. break;
  4222. memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
  4223. for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
  4224. ids = ci->targetId[ld_index];
  4225. instance->ld_ids[ids] = ci->targetId[ld_index];
  4226. if (megasas_dbg_lvl & LD_PD_DEBUG)
  4227. dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n",
  4228. ld_index, ci->targetId[ld_index]);
  4229. }
  4230. break;
  4231. }
  4232. if (ret != DCMD_TIMEOUT)
  4233. megasas_return_cmd(instance, cmd);
  4234. return ret;
  4235. }
  4236. /**
  4237. * megasas_host_device_list_query
  4238. * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET
  4239. * dcmd.mbox - reserved
  4240. * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure
  4241. * Desc: This DCMD will return the combined device list
  4242. * Status: MFI_STAT_OK - List returned successfully
  4243. * MFI_STAT_INVALID_CMD - Firmware support for the feature has been
  4244. * disabled
  4245. * @instance: Adapter soft state
  4246. * @is_probe: Driver probe check
  4247. * Return: 0 if DCMD succeeded
  4248. * non-zero if failed
  4249. */
  4250. static int
  4251. megasas_host_device_list_query(struct megasas_instance *instance,
  4252. bool is_probe)
  4253. {
  4254. int ret, i, target_id;
  4255. struct megasas_cmd *cmd;
  4256. struct megasas_dcmd_frame *dcmd;
  4257. struct MR_HOST_DEVICE_LIST *ci;
  4258. u32 count;
  4259. dma_addr_t ci_h;
  4260. ci = instance->host_device_list_buf;
  4261. ci_h = instance->host_device_list_buf_h;
  4262. cmd = megasas_get_cmd(instance);
  4263. if (!cmd) {
  4264. dev_warn(&instance->pdev->dev,
  4265. "%s: failed to get cmd\n",
  4266. __func__);
  4267. return -ENOMEM;
  4268. }
  4269. dcmd = &cmd->frame->dcmd;
  4270. memset(ci, 0, sizeof(*ci));
  4271. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  4272. dcmd->mbox.b[0] = is_probe ? 0 : 1;
  4273. dcmd->cmd = MFI_CMD_DCMD;
  4274. dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
  4275. dcmd->sge_count = 1;
  4276. dcmd->flags = MFI_FRAME_DIR_READ;
  4277. dcmd->timeout = 0;
  4278. dcmd->pad_0 = 0;
  4279. dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
  4280. dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
  4281. megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
  4282. if (!instance->mask_interrupts) {
  4283. ret = megasas_issue_blocked_cmd(instance, cmd,
  4284. MFI_IO_TIMEOUT_SECS);
  4285. } else {
  4286. ret = megasas_issue_polled(instance, cmd);
  4287. cmd->flags |= DRV_DCMD_SKIP_REFIRE;
  4288. }
  4289. switch (ret) {
  4290. case DCMD_SUCCESS:
  4291. /* Fill the internal pd_list and ld_ids array based on
  4292. * targetIds returned by FW
  4293. */
  4294. count = le32_to_cpu(ci->count);
  4295. if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))
  4296. break;
  4297. if (megasas_dbg_lvl & LD_PD_DEBUG)
  4298. dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n",
  4299. __func__, count);
  4300. memset(instance->local_pd_list, 0,
  4301. MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
  4302. memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
  4303. for (i = 0; i < count; i++) {
  4304. target_id = le16_to_cpu(ci->host_device_list[i].target_id);
  4305. if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
  4306. instance->local_pd_list[target_id].tid = target_id;
  4307. instance->local_pd_list[target_id].driveType =
  4308. ci->host_device_list[i].scsi_type;
  4309. instance->local_pd_list[target_id].driveState =
  4310. MR_PD_STATE_SYSTEM;
  4311. if (megasas_dbg_lvl & LD_PD_DEBUG)
  4312. dev_info(&instance->pdev->dev,
  4313. "Device %d: PD targetID: 0x%03x deviceType:0x%x\n",
  4314. i, target_id, ci->host_device_list[i].scsi_type);
  4315. } else {
  4316. instance->ld_ids[target_id] = target_id;
  4317. if (megasas_dbg_lvl & LD_PD_DEBUG)
  4318. dev_info(&instance->pdev->dev,
  4319. "Device %d: LD targetID: 0x%03x\n",
  4320. i, target_id);
  4321. }
  4322. }
  4323. memcpy(instance->pd_list, instance->local_pd_list,
  4324. sizeof(instance->pd_list));
  4325. break;
  4326. case DCMD_TIMEOUT:
  4327. switch (dcmd_timeout_ocr_possible(instance)) {
  4328. case INITIATE_OCR:
  4329. cmd->flags |= DRV_DCMD_SKIP_REFIRE;
  4330. mutex_unlock(&instance->reset_mutex);
  4331. megasas_reset_fusion(instance->host,
  4332. MFI_IO_TIMEOUT_OCR);
  4333. mutex_lock(&instance->reset_mutex);
  4334. break;
  4335. case KILL_ADAPTER:
  4336. megaraid_sas_kill_hba(instance);
  4337. break;
  4338. case IGNORE_TIMEOUT:
  4339. dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
  4340. __func__, __LINE__);
  4341. break;
  4342. }
  4343. break;
  4344. case DCMD_FAILED:
  4345. dev_err(&instance->pdev->dev,
  4346. "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
  4347. __func__);
  4348. break;
  4349. }
  4350. if (ret != DCMD_TIMEOUT)
  4351. megasas_return_cmd(instance, cmd);
  4352. return ret;
  4353. }
  4354. /*
  4355. * megasas_update_ext_vd_details : Update details w.r.t Extended VD
  4356. * instance : Controller's instance
  4357. */
  4358. static void megasas_update_ext_vd_details(struct megasas_instance *instance)
  4359. {
  4360. struct fusion_context *fusion;
  4361. u32 ventura_map_sz = 0;
  4362. fusion = instance->ctrl_context;
  4363. /* For MFI based controllers return dummy success */
  4364. if (!fusion)
  4365. return;
  4366. instance->supportmax256vd =
  4367. instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
  4368. /* Below is additional check to address future FW enhancement */
  4369. if (instance->ctrl_info_buf->max_lds > 64)
  4370. instance->supportmax256vd = 1;
  4371. instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
  4372. * MEGASAS_MAX_DEV_PER_CHANNEL;
  4373. instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
  4374. * MEGASAS_MAX_DEV_PER_CHANNEL;
  4375. if (instance->supportmax256vd) {
  4376. instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
  4377. instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
  4378. } else {
  4379. instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
  4380. instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
  4381. }
  4382. dev_info(&instance->pdev->dev,
  4383. "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
  4384. instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
  4385. instance->ctrl_info_buf->max_lds);
  4386. if (instance->max_raid_mapsize) {
  4387. ventura_map_sz = instance->max_raid_mapsize *
  4388. MR_MIN_MAP_SIZE; /* 64k */
  4389. fusion->current_map_sz = ventura_map_sz;
  4390. fusion->max_map_sz = ventura_map_sz;
  4391. } else {
  4392. fusion->old_map_sz =
  4393. struct_size((struct MR_FW_RAID_MAP *)0, ldSpanMap,
  4394. instance->fw_supported_vd_count);
  4395. fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
  4396. fusion->max_map_sz =
  4397. max(fusion->old_map_sz, fusion->new_map_sz);
  4398. if (instance->supportmax256vd)
  4399. fusion->current_map_sz = fusion->new_map_sz;
  4400. else
  4401. fusion->current_map_sz = fusion->old_map_sz;
  4402. }
  4403. /* irrespective of FW raid maps, driver raid map is constant */
  4404. fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
  4405. }
  4406. /*
  4407. * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
  4408. * dcmd.hdr.length - number of bytes to read
  4409. * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES
  4410. * Desc: Fill in snapdump properties
  4411. * Status: MFI_STAT_OK- Command successful
  4412. */
  4413. void megasas_get_snapdump_properties(struct megasas_instance *instance)
  4414. {
  4415. int ret = 0;
  4416. struct megasas_cmd *cmd;
  4417. struct megasas_dcmd_frame *dcmd;
  4418. struct MR_SNAPDUMP_PROPERTIES *ci;
  4419. dma_addr_t ci_h = 0;
  4420. ci = instance->snapdump_prop;
  4421. ci_h = instance->snapdump_prop_h;
  4422. if (!ci)
  4423. return;
  4424. cmd = megasas_get_cmd(instance);
  4425. if (!cmd) {
  4426. dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
  4427. return;
  4428. }
  4429. dcmd = &cmd->frame->dcmd;
  4430. memset(ci, 0, sizeof(*ci));
  4431. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  4432. dcmd->cmd = MFI_CMD_DCMD;
  4433. dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
  4434. dcmd->sge_count = 1;
  4435. dcmd->flags = MFI_FRAME_DIR_READ;
  4436. dcmd->timeout = 0;
  4437. dcmd->pad_0 = 0;
  4438. dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
  4439. dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
  4440. megasas_set_dma_settings(instance, dcmd, ci_h,
  4441. sizeof(struct MR_SNAPDUMP_PROPERTIES));
  4442. if (!instance->mask_interrupts) {
  4443. ret = megasas_issue_blocked_cmd(instance, cmd,
  4444. MFI_IO_TIMEOUT_SECS);
  4445. } else {
  4446. ret = megasas_issue_polled(instance, cmd);
  4447. cmd->flags |= DRV_DCMD_SKIP_REFIRE;
  4448. }
  4449. switch (ret) {
  4450. case DCMD_SUCCESS:
  4451. instance->snapdump_wait_time =
  4452. min_t(u8, ci->trigger_min_num_sec_before_ocr,
  4453. MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
  4454. break;
  4455. case DCMD_TIMEOUT:
  4456. switch (dcmd_timeout_ocr_possible(instance)) {
  4457. case INITIATE_OCR:
  4458. cmd->flags |= DRV_DCMD_SKIP_REFIRE;
  4459. mutex_unlock(&instance->reset_mutex);
  4460. megasas_reset_fusion(instance->host,
  4461. MFI_IO_TIMEOUT_OCR);
  4462. mutex_lock(&instance->reset_mutex);
  4463. break;
  4464. case KILL_ADAPTER:
  4465. megaraid_sas_kill_hba(instance);
  4466. break;
  4467. case IGNORE_TIMEOUT:
  4468. dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
  4469. __func__, __LINE__);
  4470. break;
  4471. }
  4472. }
  4473. if (ret != DCMD_TIMEOUT)
  4474. megasas_return_cmd(instance, cmd);
  4475. }
  4476. /**
  4477. * megasas_get_ctrl_info - Returns FW's controller structure
  4478. * @instance: Adapter soft state
  4479. *
  4480. * Issues an internal command (DCMD) to get the FW's controller structure.
  4481. * This information is mainly used to find out the maximum IO transfer per
  4482. * command supported by the FW.
  4483. */
  4484. int
  4485. megasas_get_ctrl_info(struct megasas_instance *instance)
  4486. {
  4487. int ret = 0;
  4488. struct megasas_cmd *cmd;
  4489. struct megasas_dcmd_frame *dcmd;
  4490. struct megasas_ctrl_info *ci;
  4491. dma_addr_t ci_h = 0;
  4492. ci = instance->ctrl_info_buf;
  4493. ci_h = instance->ctrl_info_buf_h;
  4494. cmd = megasas_get_cmd(instance);
  4495. if (!cmd) {
  4496. dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
  4497. return -ENOMEM;
  4498. }
  4499. dcmd = &cmd->frame->dcmd;
  4500. memset(ci, 0, sizeof(*ci));
  4501. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  4502. dcmd->cmd = MFI_CMD_DCMD;
  4503. dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
  4504. dcmd->sge_count = 1;
  4505. dcmd->flags = MFI_FRAME_DIR_READ;
  4506. dcmd->timeout = 0;
  4507. dcmd->pad_0 = 0;
  4508. dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
  4509. dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
  4510. dcmd->mbox.b[0] = 1;
  4511. megasas_set_dma_settings(instance, dcmd, ci_h,
  4512. sizeof(struct megasas_ctrl_info));
  4513. if ((instance->adapter_type != MFI_SERIES) &&
  4514. !instance->mask_interrupts) {
  4515. ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
  4516. } else {
  4517. ret = megasas_issue_polled(instance, cmd);
  4518. cmd->flags |= DRV_DCMD_SKIP_REFIRE;
  4519. }
  4520. switch (ret) {
  4521. case DCMD_SUCCESS:
  4522. /* Save required controller information in
  4523. * CPU endianness format.
  4524. */
  4525. le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
  4526. le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
  4527. le32_to_cpus((u32 *)&ci->adapterOperations2);
  4528. le32_to_cpus((u32 *)&ci->adapterOperations3);
  4529. le16_to_cpus((u16 *)&ci->adapter_operations4);
  4530. le32_to_cpus((u32 *)&ci->adapter_operations5);
  4531. /* Update the latest Ext VD info.
  4532. * From Init path, store current firmware details.
  4533. * From OCR path, detect any firmware properties changes.
  4534. * in case of Firmware upgrade without system reboot.
  4535. */
  4536. megasas_update_ext_vd_details(instance);
  4537. instance->support_seqnum_jbod_fp =
  4538. ci->adapterOperations3.useSeqNumJbodFP;
  4539. instance->support_morethan256jbod =
  4540. ci->adapter_operations4.support_pd_map_target_id;
  4541. instance->support_nvme_passthru =
  4542. ci->adapter_operations4.support_nvme_passthru;
  4543. instance->support_pci_lane_margining =
  4544. ci->adapter_operations5.support_pci_lane_margining;
  4545. instance->task_abort_tmo = ci->TaskAbortTO;
  4546. instance->max_reset_tmo = ci->MaxResetTO;
  4547. /*Check whether controller is iMR or MR */
  4548. instance->is_imr = (ci->memory_size ? 0 : 1);
  4549. instance->snapdump_wait_time =
  4550. (ci->properties.on_off_properties2.enable_snap_dump ?
  4551. MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
  4552. instance->enable_fw_dev_list =
  4553. ci->properties.on_off_properties2.enable_fw_dev_list;
  4554. dev_info(&instance->pdev->dev,
  4555. "controller type\t: %s(%dMB)\n",
  4556. instance->is_imr ? "iMR" : "MR",
  4557. le16_to_cpu(ci->memory_size));
  4558. instance->disableOnlineCtrlReset =
  4559. ci->properties.OnOffProperties.disableOnlineCtrlReset;
  4560. instance->secure_jbod_support =
  4561. ci->adapterOperations3.supportSecurityonJBOD;
  4562. dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
  4563. instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
  4564. dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
  4565. instance->secure_jbod_support ? "Yes" : "No");
  4566. dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
  4567. instance->support_nvme_passthru ? "Yes" : "No");
  4568. dev_info(&instance->pdev->dev,
  4569. "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
  4570. instance->task_abort_tmo, instance->max_reset_tmo);
  4571. dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n",
  4572. instance->support_seqnum_jbod_fp ? "Yes" : "No");
  4573. dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n",
  4574. instance->support_pci_lane_margining ? "Yes" : "No");
  4575. break;
  4576. case DCMD_TIMEOUT:
  4577. switch (dcmd_timeout_ocr_possible(instance)) {
  4578. case INITIATE_OCR:
  4579. cmd->flags |= DRV_DCMD_SKIP_REFIRE;
  4580. mutex_unlock(&instance->reset_mutex);
  4581. megasas_reset_fusion(instance->host,
  4582. MFI_IO_TIMEOUT_OCR);
  4583. mutex_lock(&instance->reset_mutex);
  4584. break;
  4585. case KILL_ADAPTER:
  4586. megaraid_sas_kill_hba(instance);
  4587. break;
  4588. case IGNORE_TIMEOUT:
  4589. dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
  4590. __func__, __LINE__);
  4591. break;
  4592. }
  4593. break;
  4594. case DCMD_FAILED:
  4595. megaraid_sas_kill_hba(instance);
  4596. break;
  4597. }
  4598. if (ret != DCMD_TIMEOUT)
  4599. megasas_return_cmd(instance, cmd);
  4600. return ret;
  4601. }
  4602. /*
  4603. * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer
  4604. * to firmware
  4605. *
  4606. * @instance: Adapter soft state
  4607. * @crash_buf_state - tell FW to turn ON/OFF crash dump feature
  4608. MR_CRASH_BUF_TURN_OFF = 0
  4609. MR_CRASH_BUF_TURN_ON = 1
  4610. * @return 0 on success non-zero on failure.
  4611. * Issues an internal command (DCMD) to set parameters for crash dump feature.
  4612. * Driver will send address of crash dump DMA buffer and set mbox to tell FW
  4613. * that driver supports crash dump feature. This DCMD will be sent only if
  4614. * crash dump feature is supported by the FW.
  4615. *
  4616. */
  4617. int megasas_set_crash_dump_params(struct megasas_instance *instance,
  4618. u8 crash_buf_state)
  4619. {
  4620. int ret = 0;
  4621. struct megasas_cmd *cmd;
  4622. struct megasas_dcmd_frame *dcmd;
  4623. cmd = megasas_get_cmd(instance);
  4624. if (!cmd) {
  4625. dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
  4626. return -ENOMEM;
  4627. }
  4628. dcmd = &cmd->frame->dcmd;
  4629. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  4630. dcmd->mbox.b[0] = crash_buf_state;
  4631. dcmd->cmd = MFI_CMD_DCMD;
  4632. dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
  4633. dcmd->sge_count = 1;
  4634. dcmd->flags = MFI_FRAME_DIR_NONE;
  4635. dcmd->timeout = 0;
  4636. dcmd->pad_0 = 0;
  4637. dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
  4638. dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
  4639. megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
  4640. CRASH_DMA_BUF_SIZE);
  4641. if ((instance->adapter_type != MFI_SERIES) &&
  4642. !instance->mask_interrupts)
  4643. ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
  4644. else
  4645. ret = megasas_issue_polled(instance, cmd);
  4646. if (ret == DCMD_TIMEOUT) {
  4647. switch (dcmd_timeout_ocr_possible(instance)) {
  4648. case INITIATE_OCR:
  4649. cmd->flags |= DRV_DCMD_SKIP_REFIRE;
  4650. megasas_reset_fusion(instance->host,
  4651. MFI_IO_TIMEOUT_OCR);
  4652. break;
  4653. case KILL_ADAPTER:
  4654. megaraid_sas_kill_hba(instance);
  4655. break;
  4656. case IGNORE_TIMEOUT:
  4657. dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
  4658. __func__, __LINE__);
  4659. break;
  4660. }
  4661. } else
  4662. megasas_return_cmd(instance, cmd);
  4663. return ret;
  4664. }
  4665. /**
  4666. * megasas_issue_init_mfi - Initializes the FW
  4667. * @instance: Adapter soft state
  4668. *
  4669. * Issues the INIT MFI cmd
  4670. */
  4671. static int
  4672. megasas_issue_init_mfi(struct megasas_instance *instance)
  4673. {
  4674. __le32 context;
  4675. struct megasas_cmd *cmd;
  4676. struct megasas_init_frame *init_frame;
  4677. struct megasas_init_queue_info *initq_info;
  4678. dma_addr_t init_frame_h;
  4679. dma_addr_t initq_info_h;
  4680. /*
  4681. * Prepare a init frame. Note the init frame points to queue info
  4682. * structure. Each frame has SGL allocated after first 64 bytes. For
  4683. * this frame - since we don't need any SGL - we use SGL's space as
  4684. * queue info structure
  4685. *
  4686. * We will not get a NULL command below. We just created the pool.
  4687. */
  4688. cmd = megasas_get_cmd(instance);
  4689. init_frame = (struct megasas_init_frame *)cmd->frame;
  4690. initq_info = (struct megasas_init_queue_info *)
  4691. ((unsigned long)init_frame + 64);
  4692. init_frame_h = cmd->frame_phys_addr;
  4693. initq_info_h = init_frame_h + 64;
  4694. context = init_frame->context;
  4695. memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
  4696. memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
  4697. init_frame->context = context;
  4698. initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
  4699. initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
  4700. initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
  4701. initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
  4702. init_frame->cmd = MFI_CMD_INIT;
  4703. init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
  4704. init_frame->queue_info_new_phys_addr_lo =
  4705. cpu_to_le32(lower_32_bits(initq_info_h));
  4706. init_frame->queue_info_new_phys_addr_hi =
  4707. cpu_to_le32(upper_32_bits(initq_info_h));
  4708. init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
  4709. /*
  4710. * disable the intr before firing the init frame to FW
  4711. */
  4712. instance->instancet->disable_intr(instance);
  4713. /*
  4714. * Issue the init frame in polled mode
  4715. */
  4716. if (megasas_issue_polled(instance, cmd)) {
  4717. dev_err(&instance->pdev->dev, "Failed to init firmware\n");
  4718. megasas_return_cmd(instance, cmd);
  4719. goto fail_fw_init;
  4720. }
  4721. megasas_return_cmd(instance, cmd);
  4722. return 0;
  4723. fail_fw_init:
  4724. return -EINVAL;
  4725. }
  4726. static u32
  4727. megasas_init_adapter_mfi(struct megasas_instance *instance)
  4728. {
  4729. u32 context_sz;
  4730. u32 reply_q_sz;
  4731. /*
  4732. * Get various operational parameters from status register
  4733. */
  4734. instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
  4735. /*
  4736. * Reduce the max supported cmds by 1. This is to ensure that the
  4737. * reply_q_sz (1 more than the max cmd that driver may send)
  4738. * does not exceed max cmds that the FW can support
  4739. */
  4740. instance->max_fw_cmds = instance->max_fw_cmds-1;
  4741. instance->max_mfi_cmds = instance->max_fw_cmds;
  4742. instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
  4743. 0x10;
  4744. /*
  4745. * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
  4746. * are reserved for IOCTL + driver's internal DCMDs.
  4747. */
  4748. if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
  4749. (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
  4750. instance->max_scsi_cmds = (instance->max_fw_cmds -
  4751. MEGASAS_SKINNY_INT_CMDS);
  4752. sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
  4753. } else {
  4754. instance->max_scsi_cmds = (instance->max_fw_cmds -
  4755. MEGASAS_INT_CMDS);
  4756. sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
  4757. }
  4758. instance->cur_can_queue = instance->max_scsi_cmds;
  4759. /*
  4760. * Create a pool of commands
  4761. */
  4762. if (megasas_alloc_cmds(instance))
  4763. goto fail_alloc_cmds;
  4764. /*
  4765. * Allocate memory for reply queue. Length of reply queue should
  4766. * be _one_ more than the maximum commands handled by the firmware.
  4767. *
  4768. * Note: When FW completes commands, it places corresponding contex
  4769. * values in this circular reply queue. This circular queue is a fairly
  4770. * typical producer-consumer queue. FW is the producer (of completed
  4771. * commands) and the driver is the consumer.
  4772. */
  4773. context_sz = sizeof(u32);
  4774. reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
  4775. instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
  4776. reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
  4777. if (!instance->reply_queue) {
  4778. dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
  4779. goto fail_reply_queue;
  4780. }
  4781. if (megasas_issue_init_mfi(instance))
  4782. goto fail_fw_init;
  4783. if (megasas_get_ctrl_info(instance)) {
  4784. dev_err(&instance->pdev->dev, "(%d): Could get controller info "
  4785. "Fail from %s %d\n", instance->unique_id,
  4786. __func__, __LINE__);
  4787. goto fail_fw_init;
  4788. }
  4789. instance->fw_support_ieee = 0;
  4790. instance->fw_support_ieee =
  4791. (instance->instancet->read_fw_status_reg(instance) &
  4792. 0x04000000);
  4793. dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
  4794. instance->fw_support_ieee);
  4795. if (instance->fw_support_ieee)
  4796. instance->flag_ieee = 1;
  4797. return 0;
  4798. fail_fw_init:
  4799. dma_free_coherent(&instance->pdev->dev, reply_q_sz,
  4800. instance->reply_queue, instance->reply_queue_h);
  4801. fail_reply_queue:
  4802. megasas_free_cmds(instance);
  4803. fail_alloc_cmds:
  4804. return 1;
  4805. }
  4806. static
  4807. void megasas_setup_irq_poll(struct megasas_instance *instance)
  4808. {
  4809. struct megasas_irq_context *irq_ctx;
  4810. u32 count, i;
  4811. count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
  4812. /* Initialize IRQ poll */
  4813. for (i = 0; i < count; i++) {
  4814. irq_ctx = &instance->irq_context[i];
  4815. irq_ctx->os_irq = pci_irq_vector(instance->pdev, i);
  4816. irq_ctx->irq_poll_scheduled = false;
  4817. irq_poll_init(&irq_ctx->irqpoll,
  4818. instance->threshold_reply_count,
  4819. megasas_irqpoll);
  4820. }
  4821. }
  4822. /*
  4823. * megasas_setup_irqs_ioapic - register legacy interrupts.
  4824. * @instance: Adapter soft state
  4825. *
  4826. * Do not enable interrupt, only setup ISRs.
  4827. *
  4828. * Return 0 on success.
  4829. */
  4830. static int
  4831. megasas_setup_irqs_ioapic(struct megasas_instance *instance)
  4832. {
  4833. struct pci_dev *pdev;
  4834. pdev = instance->pdev;
  4835. instance->irq_context[0].instance = instance;
  4836. instance->irq_context[0].MSIxIndex = 0;
  4837. snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u",
  4838. "megasas", instance->host->host_no);
  4839. if (request_irq(pci_irq_vector(pdev, 0),
  4840. instance->instancet->service_isr, IRQF_SHARED,
  4841. instance->irq_context->name, &instance->irq_context[0])) {
  4842. dev_err(&instance->pdev->dev,
  4843. "Failed to register IRQ from %s %d\n",
  4844. __func__, __LINE__);
  4845. return -1;
  4846. }
  4847. instance->perf_mode = MR_LATENCY_PERF_MODE;
  4848. instance->low_latency_index_start = 0;
  4849. return 0;
  4850. }
  4851. /**
  4852. * megasas_setup_irqs_msix - register MSI-x interrupts.
  4853. * @instance: Adapter soft state
  4854. * @is_probe: Driver probe check
  4855. *
  4856. * Do not enable interrupt, only setup ISRs.
  4857. *
  4858. * Return 0 on success.
  4859. */
  4860. static int
  4861. megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
  4862. {
  4863. int i, j;
  4864. struct pci_dev *pdev;
  4865. pdev = instance->pdev;
  4866. /* Try MSI-x */
  4867. for (i = 0; i < instance->msix_vectors; i++) {
  4868. instance->irq_context[i].instance = instance;
  4869. instance->irq_context[i].MSIxIndex = i;
  4870. snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u",
  4871. "megasas", instance->host->host_no, i);
  4872. if (request_irq(pci_irq_vector(pdev, i),
  4873. instance->instancet->service_isr, 0, instance->irq_context[i].name,
  4874. &instance->irq_context[i])) {
  4875. dev_err(&instance->pdev->dev,
  4876. "Failed to register IRQ for vector %d.\n", i);
  4877. for (j = 0; j < i; j++) {
  4878. if (j < instance->low_latency_index_start)
  4879. irq_update_affinity_hint(
  4880. pci_irq_vector(pdev, j), NULL);
  4881. free_irq(pci_irq_vector(pdev, j),
  4882. &instance->irq_context[j]);
  4883. }
  4884. /* Retry irq register for IO_APIC*/
  4885. instance->msix_vectors = 0;
  4886. instance->msix_load_balance = false;
  4887. if (is_probe) {
  4888. pci_free_irq_vectors(instance->pdev);
  4889. return megasas_setup_irqs_ioapic(instance);
  4890. } else {
  4891. return -1;
  4892. }
  4893. }
  4894. }
  4895. return 0;
  4896. }
  4897. /*
  4898. * megasas_destroy_irqs- unregister interrupts.
  4899. * @instance: Adapter soft state
  4900. * return: void
  4901. */
  4902. static void
  4903. megasas_destroy_irqs(struct megasas_instance *instance) {
  4904. int i;
  4905. int count;
  4906. struct megasas_irq_context *irq_ctx;
  4907. count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
  4908. if (instance->adapter_type != MFI_SERIES) {
  4909. for (i = 0; i < count; i++) {
  4910. irq_ctx = &instance->irq_context[i];
  4911. irq_poll_disable(&irq_ctx->irqpoll);
  4912. }
  4913. }
  4914. if (instance->msix_vectors)
  4915. for (i = 0; i < instance->msix_vectors; i++) {
  4916. if (i < instance->low_latency_index_start)
  4917. irq_update_affinity_hint(
  4918. pci_irq_vector(instance->pdev, i), NULL);
  4919. free_irq(pci_irq_vector(instance->pdev, i),
  4920. &instance->irq_context[i]);
  4921. }
  4922. else
  4923. free_irq(pci_irq_vector(instance->pdev, 0),
  4924. &instance->irq_context[0]);
  4925. }
  4926. /**
  4927. * megasas_setup_jbod_map - setup jbod map for FP seq_number.
  4928. * @instance: Adapter soft state
  4929. *
  4930. * Return 0 on success.
  4931. */
  4932. void
  4933. megasas_setup_jbod_map(struct megasas_instance *instance)
  4934. {
  4935. int i;
  4936. struct fusion_context *fusion = instance->ctrl_context;
  4937. size_t pd_seq_map_sz;
  4938. pd_seq_map_sz = struct_size((struct MR_PD_CFG_SEQ_NUM_SYNC *)0, seq,
  4939. MAX_PHYSICAL_DEVICES);
  4940. instance->use_seqnum_jbod_fp =
  4941. instance->support_seqnum_jbod_fp;
  4942. if (reset_devices || !fusion ||
  4943. !instance->support_seqnum_jbod_fp) {
  4944. dev_info(&instance->pdev->dev,
  4945. "JBOD sequence map is disabled %s %d\n",
  4946. __func__, __LINE__);
  4947. instance->use_seqnum_jbod_fp = false;
  4948. return;
  4949. }
  4950. if (fusion->pd_seq_sync[0])
  4951. goto skip_alloc;
  4952. for (i = 0; i < JBOD_MAPS_COUNT; i++) {
  4953. fusion->pd_seq_sync[i] = dma_alloc_coherent
  4954. (&instance->pdev->dev, pd_seq_map_sz,
  4955. &fusion->pd_seq_phys[i], GFP_KERNEL);
  4956. if (!fusion->pd_seq_sync[i]) {
  4957. dev_err(&instance->pdev->dev,
  4958. "Failed to allocate memory from %s %d\n",
  4959. __func__, __LINE__);
  4960. if (i == 1) {
  4961. dma_free_coherent(&instance->pdev->dev,
  4962. pd_seq_map_sz, fusion->pd_seq_sync[0],
  4963. fusion->pd_seq_phys[0]);
  4964. fusion->pd_seq_sync[0] = NULL;
  4965. }
  4966. instance->use_seqnum_jbod_fp = false;
  4967. return;
  4968. }
  4969. }
  4970. skip_alloc:
  4971. if (!megasas_sync_pd_seq_num(instance, false) &&
  4972. !megasas_sync_pd_seq_num(instance, true))
  4973. instance->use_seqnum_jbod_fp = true;
  4974. else
  4975. instance->use_seqnum_jbod_fp = false;
  4976. }
  4977. static void megasas_setup_reply_map(struct megasas_instance *instance)
  4978. {
  4979. const struct cpumask *mask;
  4980. unsigned int queue, cpu, low_latency_index_start;
  4981. low_latency_index_start = instance->low_latency_index_start;
  4982. for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
  4983. mask = pci_irq_get_affinity(instance->pdev, queue);
  4984. if (!mask)
  4985. goto fallback;
  4986. for_each_cpu(cpu, mask)
  4987. instance->reply_map[cpu] = queue;
  4988. }
  4989. return;
  4990. fallback:
  4991. queue = low_latency_index_start;
  4992. for_each_possible_cpu(cpu) {
  4993. instance->reply_map[cpu] = queue;
  4994. if (queue == (instance->msix_vectors - 1))
  4995. queue = low_latency_index_start;
  4996. else
  4997. queue++;
  4998. }
  4999. }
  5000. /**
  5001. * megasas_get_device_list - Get the PD and LD device list from FW.
  5002. * @instance: Adapter soft state
  5003. * @return: Success or failure
  5004. *
  5005. * Issue DCMDs to Firmware to get the PD and LD list.
  5006. * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
  5007. * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
  5008. */
  5009. static
  5010. int megasas_get_device_list(struct megasas_instance *instance)
  5011. {
  5012. if (instance->enable_fw_dev_list) {
  5013. if (megasas_host_device_list_query(instance, true))
  5014. return FAILED;
  5015. } else {
  5016. if (megasas_get_pd_list(instance) < 0) {
  5017. dev_err(&instance->pdev->dev, "failed to get PD list\n");
  5018. return FAILED;
  5019. }
  5020. if (megasas_ld_list_query(instance,
  5021. MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
  5022. dev_err(&instance->pdev->dev, "failed to get LD list\n");
  5023. return FAILED;
  5024. }
  5025. }
  5026. return SUCCESS;
  5027. }
  5028. /**
  5029. * megasas_set_high_iops_queue_affinity_and_hint - Set affinity and hint
  5030. * for high IOPS queues
  5031. * @instance: Adapter soft state
  5032. * return: void
  5033. */
  5034. static inline void
  5035. megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance)
  5036. {
  5037. int i;
  5038. unsigned int irq;
  5039. const struct cpumask *mask;
  5040. if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
  5041. mask = cpumask_of_node(dev_to_node(&instance->pdev->dev));
  5042. for (i = 0; i < instance->low_latency_index_start; i++) {
  5043. irq = pci_irq_vector(instance->pdev, i);
  5044. irq_set_affinity_and_hint(irq, mask);
  5045. }
  5046. }
  5047. }
  5048. static int
  5049. __megasas_alloc_irq_vectors(struct megasas_instance *instance)
  5050. {
  5051. int i, irq_flags;
  5052. struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start };
  5053. struct irq_affinity *descp = &desc;
  5054. irq_flags = PCI_IRQ_MSIX;
  5055. if (instance->smp_affinity_enable)
  5056. irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
  5057. else
  5058. descp = NULL;
  5059. /* Do not allocate msix vectors for poll_queues.
  5060. * msix_vectors is always within a range of FW supported reply queue.
  5061. */
  5062. i = pci_alloc_irq_vectors_affinity(instance->pdev,
  5063. instance->low_latency_index_start,
  5064. instance->msix_vectors - instance->iopoll_q_count, irq_flags, descp);
  5065. return i;
  5066. }
  5067. /**
  5068. * megasas_alloc_irq_vectors - Allocate IRQ vectors/enable MSI-x vectors
  5069. * @instance: Adapter soft state
  5070. * return: void
  5071. */
  5072. static void
  5073. megasas_alloc_irq_vectors(struct megasas_instance *instance)
  5074. {
  5075. int i;
  5076. unsigned int num_msix_req;
  5077. instance->iopoll_q_count = 0;
  5078. if ((instance->adapter_type != MFI_SERIES) &&
  5079. poll_queues) {
  5080. instance->perf_mode = MR_LATENCY_PERF_MODE;
  5081. instance->low_latency_index_start = 1;
  5082. /* reserve for default and non-mananged pre-vector. */
  5083. if (instance->msix_vectors > (poll_queues + 2))
  5084. instance->iopoll_q_count = poll_queues;
  5085. else
  5086. instance->iopoll_q_count = 0;
  5087. num_msix_req = num_online_cpus() + instance->low_latency_index_start;
  5088. instance->msix_vectors = min(num_msix_req,
  5089. instance->msix_vectors);
  5090. }
  5091. i = __megasas_alloc_irq_vectors(instance);
  5092. if (((instance->perf_mode == MR_BALANCED_PERF_MODE)
  5093. || instance->iopoll_q_count) &&
  5094. (i != (instance->msix_vectors - instance->iopoll_q_count))) {
  5095. if (instance->msix_vectors)
  5096. pci_free_irq_vectors(instance->pdev);
  5097. /* Disable Balanced IOPS mode and try realloc vectors */
  5098. instance->perf_mode = MR_LATENCY_PERF_MODE;
  5099. instance->low_latency_index_start = 1;
  5100. num_msix_req = num_online_cpus() + instance->low_latency_index_start;
  5101. instance->msix_vectors = min(num_msix_req,
  5102. instance->msix_vectors);
  5103. instance->iopoll_q_count = 0;
  5104. i = __megasas_alloc_irq_vectors(instance);
  5105. }
  5106. dev_info(&instance->pdev->dev,
  5107. "requested/available msix %d/%d poll_queue %d\n",
  5108. instance->msix_vectors - instance->iopoll_q_count,
  5109. i, instance->iopoll_q_count);
  5110. if (i > 0)
  5111. instance->msix_vectors = i;
  5112. else
  5113. instance->msix_vectors = 0;
  5114. if (instance->smp_affinity_enable)
  5115. megasas_set_high_iops_queue_affinity_and_hint(instance);
  5116. }
  5117. /**
  5118. * megasas_init_fw - Initializes the FW
  5119. * @instance: Adapter soft state
  5120. *
  5121. * This is the main function for initializing firmware
  5122. */
  5123. static int megasas_init_fw(struct megasas_instance *instance)
  5124. {
  5125. u32 max_sectors_1;
  5126. u32 max_sectors_2, tmp_sectors, msix_enable;
  5127. u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
  5128. resource_size_t base_addr;
  5129. void *base_addr_phys;
  5130. struct megasas_ctrl_info *ctrl_info = NULL;
  5131. unsigned long bar_list;
  5132. int i, j, loop;
  5133. struct IOV_111 *iovPtr;
  5134. struct fusion_context *fusion;
  5135. bool intr_coalescing;
  5136. unsigned int num_msix_req;
  5137. u16 lnksta, speed;
  5138. fusion = instance->ctrl_context;
  5139. /* Find first memory bar */
  5140. bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
  5141. instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
  5142. if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
  5143. "megasas: LSI")) {
  5144. dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
  5145. return -EBUSY;
  5146. }
  5147. base_addr = pci_resource_start(instance->pdev, instance->bar);
  5148. instance->reg_set = ioremap(base_addr, 8192);
  5149. if (!instance->reg_set) {
  5150. dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
  5151. goto fail_ioremap;
  5152. }
  5153. base_addr_phys = &base_addr;
  5154. dev_printk(KERN_DEBUG, &instance->pdev->dev,
  5155. "BAR:0x%lx BAR's base_addr(phys):%pa mapped virt_addr:0x%p\n",
  5156. instance->bar, base_addr_phys, instance->reg_set);
  5157. if (instance->adapter_type != MFI_SERIES)
  5158. instance->instancet = &megasas_instance_template_fusion;
  5159. else {
  5160. switch (instance->pdev->device) {
  5161. case PCI_DEVICE_ID_LSI_SAS1078R:
  5162. case PCI_DEVICE_ID_LSI_SAS1078DE:
  5163. instance->instancet = &megasas_instance_template_ppc;
  5164. break;
  5165. case PCI_DEVICE_ID_LSI_SAS1078GEN2:
  5166. case PCI_DEVICE_ID_LSI_SAS0079GEN2:
  5167. instance->instancet = &megasas_instance_template_gen2;
  5168. break;
  5169. case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
  5170. case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
  5171. instance->instancet = &megasas_instance_template_skinny;
  5172. break;
  5173. case PCI_DEVICE_ID_LSI_SAS1064R:
  5174. case PCI_DEVICE_ID_DELL_PERC5:
  5175. default:
  5176. instance->instancet = &megasas_instance_template_xscale;
  5177. instance->pd_list_not_supported = 1;
  5178. break;
  5179. }
  5180. }
  5181. if (megasas_transition_to_ready(instance, 0)) {
  5182. dev_info(&instance->pdev->dev,
  5183. "Failed to transition controller to ready from %s!\n",
  5184. __func__);
  5185. if (instance->adapter_type != MFI_SERIES) {
  5186. status_reg = instance->instancet->read_fw_status_reg(
  5187. instance);
  5188. if (status_reg & MFI_RESET_ADAPTER) {
  5189. if (megasas_adp_reset_wait_for_ready
  5190. (instance, true, 0) == FAILED)
  5191. goto fail_ready_state;
  5192. } else {
  5193. goto fail_ready_state;
  5194. }
  5195. } else {
  5196. atomic_set(&instance->fw_reset_no_pci_access, 1);
  5197. instance->instancet->adp_reset
  5198. (instance, instance->reg_set);
  5199. atomic_set(&instance->fw_reset_no_pci_access, 0);
  5200. /*waiting for about 30 second before retry*/
  5201. ssleep(30);
  5202. if (megasas_transition_to_ready(instance, 0))
  5203. goto fail_ready_state;
  5204. }
  5205. dev_info(&instance->pdev->dev,
  5206. "FW restarted successfully from %s!\n",
  5207. __func__);
  5208. }
  5209. megasas_init_ctrl_params(instance);
  5210. if (megasas_set_dma_mask(instance))
  5211. goto fail_ready_state;
  5212. if (megasas_alloc_ctrl_mem(instance))
  5213. goto fail_alloc_dma_buf;
  5214. if (megasas_alloc_ctrl_dma_buffers(instance))
  5215. goto fail_alloc_dma_buf;
  5216. fusion = instance->ctrl_context;
  5217. if (instance->adapter_type >= VENTURA_SERIES) {
  5218. scratch_pad_2 =
  5219. megasas_readl(instance,
  5220. &instance->reg_set->outbound_scratch_pad_2);
  5221. instance->max_raid_mapsize = ((scratch_pad_2 >>
  5222. MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
  5223. MR_MAX_RAID_MAP_SIZE_MASK);
  5224. }
  5225. instance->enable_sdev_max_qd = enable_sdev_max_qd;
  5226. switch (instance->adapter_type) {
  5227. case VENTURA_SERIES:
  5228. fusion->pcie_bw_limitation = true;
  5229. break;
  5230. case AERO_SERIES:
  5231. fusion->r56_div_offload = true;
  5232. break;
  5233. default:
  5234. break;
  5235. }
  5236. /* Check if MSI-X is supported while in ready state */
  5237. msix_enable = (instance->instancet->read_fw_status_reg(instance) &
  5238. 0x4000000) >> 0x1a;
  5239. if (msix_enable && !msix_disable) {
  5240. scratch_pad_1 = megasas_readl
  5241. (instance, &instance->reg_set->outbound_scratch_pad_1);
  5242. /* Check max MSI-X vectors */
  5243. if (fusion) {
  5244. if (instance->adapter_type == THUNDERBOLT_SERIES) {
  5245. /* Thunderbolt Series*/
  5246. instance->msix_vectors = (scratch_pad_1
  5247. & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
  5248. } else {
  5249. instance->msix_vectors = ((scratch_pad_1
  5250. & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
  5251. >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
  5252. /*
  5253. * For Invader series, > 8 MSI-x vectors
  5254. * supported by FW/HW implies combined
  5255. * reply queue mode is enabled.
  5256. * For Ventura series, > 16 MSI-x vectors
  5257. * supported by FW/HW implies combined
  5258. * reply queue mode is enabled.
  5259. */
  5260. switch (instance->adapter_type) {
  5261. case INVADER_SERIES:
  5262. if (instance->msix_vectors > 8)
  5263. instance->msix_combined = true;
  5264. break;
  5265. case AERO_SERIES:
  5266. case VENTURA_SERIES:
  5267. if (instance->msix_vectors > 16)
  5268. instance->msix_combined = true;
  5269. break;
  5270. }
  5271. if (rdpq_enable)
  5272. instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
  5273. 1 : 0;
  5274. if (instance->adapter_type >= INVADER_SERIES &&
  5275. !instance->msix_combined) {
  5276. instance->msix_load_balance = true;
  5277. instance->smp_affinity_enable = false;
  5278. }
  5279. /* Save 1-15 reply post index address to local memory
  5280. * Index 0 is already saved from reg offset
  5281. * MPI2_REPLY_POST_HOST_INDEX_OFFSET
  5282. */
  5283. for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
  5284. instance->reply_post_host_index_addr[loop] =
  5285. (u32 __iomem *)
  5286. ((u8 __iomem *)instance->reg_set +
  5287. MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
  5288. + (loop * 0x10));
  5289. }
  5290. }
  5291. dev_info(&instance->pdev->dev,
  5292. "firmware supports msix\t: (%d)",
  5293. instance->msix_vectors);
  5294. if (msix_vectors)
  5295. instance->msix_vectors = min(msix_vectors,
  5296. instance->msix_vectors);
  5297. } else /* MFI adapters */
  5298. instance->msix_vectors = 1;
  5299. /*
  5300. * For Aero (if some conditions are met), driver will configure a
  5301. * few additional reply queues with interrupt coalescing enabled.
  5302. * These queues with interrupt coalescing enabled are called
  5303. * High IOPS queues and rest of reply queues (based on number of
  5304. * logical CPUs) are termed as Low latency queues.
  5305. *
  5306. * Total Number of reply queues = High IOPS queues + low latency queues
  5307. *
  5308. * For rest of fusion adapters, 1 additional reply queue will be
  5309. * reserved for management commands, rest of reply queues
  5310. * (based on number of logical CPUs) will be used for IOs and
  5311. * referenced as IO queues.
  5312. * Total Number of reply queues = 1 + IO queues
  5313. *
  5314. * MFI adapters supports single MSI-x so single reply queue
  5315. * will be used for IO and management commands.
  5316. */
  5317. intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
  5318. true : false;
  5319. if (intr_coalescing &&
  5320. (num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
  5321. (instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
  5322. instance->perf_mode = MR_BALANCED_PERF_MODE;
  5323. else
  5324. instance->perf_mode = MR_LATENCY_PERF_MODE;
  5325. if (instance->adapter_type == AERO_SERIES) {
  5326. pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta);
  5327. speed = lnksta & PCI_EXP_LNKSTA_CLS;
  5328. /*
  5329. * For Aero, if PCIe link speed is <16 GT/s, then driver should operate
  5330. * in latency perf mode and enable R1 PCI bandwidth algorithm
  5331. */
  5332. if (speed < 0x4) {
  5333. instance->perf_mode = MR_LATENCY_PERF_MODE;
  5334. fusion->pcie_bw_limitation = true;
  5335. }
  5336. /*
  5337. * Performance mode settings provided through module parameter-perf_mode will
  5338. * take affect only for:
  5339. * 1. Aero family of adapters.
  5340. * 2. When user sets module parameter- perf_mode in range of 0-2.
  5341. */
  5342. if ((perf_mode >= MR_BALANCED_PERF_MODE) &&
  5343. (perf_mode <= MR_LATENCY_PERF_MODE))
  5344. instance->perf_mode = perf_mode;
  5345. /*
  5346. * If intr coalescing is not supported by controller FW, then IOPS
  5347. * and Balanced modes are not feasible.
  5348. */
  5349. if (!intr_coalescing)
  5350. instance->perf_mode = MR_LATENCY_PERF_MODE;
  5351. }
  5352. if (instance->perf_mode == MR_BALANCED_PERF_MODE)
  5353. instance->low_latency_index_start =
  5354. MR_HIGH_IOPS_QUEUE_COUNT;
  5355. else
  5356. instance->low_latency_index_start = 1;
  5357. num_msix_req = num_online_cpus() + instance->low_latency_index_start;
  5358. instance->msix_vectors = min(num_msix_req,
  5359. instance->msix_vectors);
  5360. megasas_alloc_irq_vectors(instance);
  5361. if (!instance->msix_vectors)
  5362. instance->msix_load_balance = false;
  5363. }
  5364. /*
  5365. * MSI-X host index 0 is common for all adapter.
  5366. * It is used for all MPT based Adapters.
  5367. */
  5368. if (instance->msix_combined) {
  5369. instance->reply_post_host_index_addr[0] =
  5370. (u32 *)((u8 *)instance->reg_set +
  5371. MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
  5372. } else {
  5373. instance->reply_post_host_index_addr[0] =
  5374. (u32 *)((u8 *)instance->reg_set +
  5375. MPI2_REPLY_POST_HOST_INDEX_OFFSET);
  5376. }
  5377. if (!instance->msix_vectors) {
  5378. i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
  5379. if (i < 0)
  5380. goto fail_init_adapter;
  5381. }
  5382. megasas_setup_reply_map(instance);
  5383. dev_info(&instance->pdev->dev,
  5384. "current msix/online cpus\t: (%d/%d)\n",
  5385. instance->msix_vectors, (unsigned int)num_online_cpus());
  5386. dev_info(&instance->pdev->dev,
  5387. "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
  5388. tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
  5389. (unsigned long)instance);
  5390. /*
  5391. * Below are default value for legacy Firmware.
  5392. * non-fusion based controllers
  5393. */
  5394. instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
  5395. instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
  5396. /* Get operational params, sge flags, send init cmd to controller */
  5397. if (instance->instancet->init_adapter(instance))
  5398. goto fail_init_adapter;
  5399. if (instance->adapter_type >= VENTURA_SERIES) {
  5400. scratch_pad_3 =
  5401. megasas_readl(instance,
  5402. &instance->reg_set->outbound_scratch_pad_3);
  5403. if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
  5404. MR_DEFAULT_NVME_PAGE_SHIFT)
  5405. instance->nvme_page_size =
  5406. (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
  5407. dev_info(&instance->pdev->dev,
  5408. "NVME page size\t: (%d)\n", instance->nvme_page_size);
  5409. }
  5410. if (instance->msix_vectors ?
  5411. megasas_setup_irqs_msix(instance, 1) :
  5412. megasas_setup_irqs_ioapic(instance))
  5413. goto fail_init_adapter;
  5414. if (instance->adapter_type != MFI_SERIES)
  5415. megasas_setup_irq_poll(instance);
  5416. instance->instancet->enable_intr(instance);
  5417. dev_info(&instance->pdev->dev, "INIT adapter done\n");
  5418. megasas_setup_jbod_map(instance);
  5419. if (megasas_get_device_list(instance) != SUCCESS) {
  5420. dev_err(&instance->pdev->dev,
  5421. "%s: megasas_get_device_list failed\n",
  5422. __func__);
  5423. goto fail_get_ld_pd_list;
  5424. }
  5425. /* stream detection initialization */
  5426. if (instance->adapter_type >= VENTURA_SERIES) {
  5427. fusion->stream_detect_by_ld =
  5428. kcalloc(MAX_LOGICAL_DRIVES_EXT,
  5429. sizeof(struct LD_STREAM_DETECT *),
  5430. GFP_KERNEL);
  5431. if (!fusion->stream_detect_by_ld) {
  5432. dev_err(&instance->pdev->dev,
  5433. "unable to allocate stream detection for pool of LDs\n");
  5434. goto fail_get_ld_pd_list;
  5435. }
  5436. for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
  5437. fusion->stream_detect_by_ld[i] =
  5438. kzalloc(sizeof(struct LD_STREAM_DETECT),
  5439. GFP_KERNEL);
  5440. if (!fusion->stream_detect_by_ld[i]) {
  5441. dev_err(&instance->pdev->dev,
  5442. "unable to allocate stream detect by LD\n ");
  5443. for (j = 0; j < i; ++j)
  5444. kfree(fusion->stream_detect_by_ld[j]);
  5445. kfree(fusion->stream_detect_by_ld);
  5446. fusion->stream_detect_by_ld = NULL;
  5447. goto fail_get_ld_pd_list;
  5448. }
  5449. fusion->stream_detect_by_ld[i]->mru_bit_map
  5450. = MR_STREAM_BITMAP;
  5451. }
  5452. }
  5453. /*
  5454. * Compute the max allowed sectors per IO: The controller info has two
  5455. * limits on max sectors. Driver should use the minimum of these two.
  5456. *
  5457. * 1 << stripe_sz_ops.min = max sectors per strip
  5458. *
  5459. * Note that older firmwares ( < FW ver 30) didn't report information
  5460. * to calculate max_sectors_1. So the number ended up as zero always.
  5461. */
  5462. tmp_sectors = 0;
  5463. ctrl_info = instance->ctrl_info_buf;
  5464. max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
  5465. le16_to_cpu(ctrl_info->max_strips_per_io);
  5466. max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
  5467. tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
  5468. instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
  5469. instance->passive = ctrl_info->cluster.passive;
  5470. memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
  5471. instance->UnevenSpanSupport =
  5472. ctrl_info->adapterOperations2.supportUnevenSpans;
  5473. if (instance->UnevenSpanSupport) {
  5474. struct fusion_context *fusion = instance->ctrl_context;
  5475. if (MR_ValidateMapInfo(instance, instance->map_id))
  5476. fusion->fast_path_io = 1;
  5477. else
  5478. fusion->fast_path_io = 0;
  5479. }
  5480. if (ctrl_info->host_interface.SRIOV) {
  5481. instance->requestorId = ctrl_info->iov.requestorId;
  5482. if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
  5483. if (!ctrl_info->adapterOperations2.activePassive)
  5484. instance->PlasmaFW111 = 1;
  5485. dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
  5486. instance->PlasmaFW111 ? "1.11" : "new");
  5487. if (instance->PlasmaFW111) {
  5488. iovPtr = (struct IOV_111 *)
  5489. ((unsigned char *)ctrl_info + IOV_111_OFFSET);
  5490. instance->requestorId = iovPtr->requestorId;
  5491. }
  5492. }
  5493. dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
  5494. instance->requestorId);
  5495. }
  5496. instance->crash_dump_fw_support =
  5497. ctrl_info->adapterOperations3.supportCrashDump;
  5498. instance->crash_dump_drv_support =
  5499. (instance->crash_dump_fw_support &&
  5500. instance->crash_dump_buf);
  5501. if (instance->crash_dump_drv_support)
  5502. megasas_set_crash_dump_params(instance,
  5503. MR_CRASH_BUF_TURN_OFF);
  5504. else {
  5505. if (instance->crash_dump_buf)
  5506. dma_free_coherent(&instance->pdev->dev,
  5507. CRASH_DMA_BUF_SIZE,
  5508. instance->crash_dump_buf,
  5509. instance->crash_dump_h);
  5510. instance->crash_dump_buf = NULL;
  5511. }
  5512. if (instance->snapdump_wait_time) {
  5513. megasas_get_snapdump_properties(instance);
  5514. dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
  5515. instance->snapdump_wait_time);
  5516. }
  5517. dev_info(&instance->pdev->dev,
  5518. "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
  5519. le16_to_cpu(ctrl_info->pci.vendor_id),
  5520. le16_to_cpu(ctrl_info->pci.device_id),
  5521. le16_to_cpu(ctrl_info->pci.sub_vendor_id),
  5522. le16_to_cpu(ctrl_info->pci.sub_device_id));
  5523. dev_info(&instance->pdev->dev, "unevenspan support : %s\n",
  5524. instance->UnevenSpanSupport ? "yes" : "no");
  5525. dev_info(&instance->pdev->dev, "firmware crash dump : %s\n",
  5526. instance->crash_dump_drv_support ? "yes" : "no");
  5527. dev_info(&instance->pdev->dev, "JBOD sequence map : %s\n",
  5528. instance->use_seqnum_jbod_fp ? "enabled" : "disabled");
  5529. instance->max_sectors_per_req = instance->max_num_sge *
  5530. SGE_BUFFER_SIZE / 512;
  5531. if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
  5532. instance->max_sectors_per_req = tmp_sectors;
  5533. /* Check for valid throttlequeuedepth module parameter */
  5534. if (throttlequeuedepth &&
  5535. throttlequeuedepth <= instance->max_scsi_cmds)
  5536. instance->throttlequeuedepth = throttlequeuedepth;
  5537. else
  5538. instance->throttlequeuedepth =
  5539. MEGASAS_THROTTLE_QUEUE_DEPTH;
  5540. if ((resetwaittime < 1) ||
  5541. (resetwaittime > MEGASAS_RESET_WAIT_TIME))
  5542. resetwaittime = MEGASAS_RESET_WAIT_TIME;
  5543. if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
  5544. scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
  5545. /* Launch SR-IOV heartbeat timer */
  5546. if (instance->requestorId) {
  5547. if (!megasas_sriov_start_heartbeat(instance, 1)) {
  5548. megasas_start_timer(instance);
  5549. } else {
  5550. instance->skip_heartbeat_timer_del = 1;
  5551. goto fail_get_ld_pd_list;
  5552. }
  5553. }
  5554. /*
  5555. * Create and start watchdog thread which will monitor
  5556. * controller state every 1 sec and trigger OCR when
  5557. * it enters fault state
  5558. */
  5559. if (instance->adapter_type != MFI_SERIES)
  5560. if (megasas_fusion_start_watchdog(instance) != SUCCESS)
  5561. goto fail_start_watchdog;
  5562. return 0;
  5563. fail_start_watchdog:
  5564. if (instance->requestorId && !instance->skip_heartbeat_timer_del)
  5565. del_timer_sync(&instance->sriov_heartbeat_timer);
  5566. fail_get_ld_pd_list:
  5567. instance->instancet->disable_intr(instance);
  5568. megasas_destroy_irqs(instance);
  5569. fail_init_adapter:
  5570. if (instance->msix_vectors)
  5571. pci_free_irq_vectors(instance->pdev);
  5572. instance->msix_vectors = 0;
  5573. fail_alloc_dma_buf:
  5574. megasas_free_ctrl_dma_buffers(instance);
  5575. megasas_free_ctrl_mem(instance);
  5576. fail_ready_state:
  5577. iounmap(instance->reg_set);
  5578. fail_ioremap:
  5579. pci_release_selected_regions(instance->pdev, 1<<instance->bar);
  5580. dev_err(&instance->pdev->dev, "Failed from %s %d\n",
  5581. __func__, __LINE__);
  5582. return -EINVAL;
  5583. }
  5584. /**
  5585. * megasas_release_mfi - Reverses the FW initialization
  5586. * @instance: Adapter soft state
  5587. */
  5588. static void megasas_release_mfi(struct megasas_instance *instance)
  5589. {
  5590. u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
  5591. if (instance->reply_queue)
  5592. dma_free_coherent(&instance->pdev->dev, reply_q_sz,
  5593. instance->reply_queue, instance->reply_queue_h);
  5594. megasas_free_cmds(instance);
  5595. iounmap(instance->reg_set);
  5596. pci_release_selected_regions(instance->pdev, 1<<instance->bar);
  5597. }
  5598. /**
  5599. * megasas_get_seq_num - Gets latest event sequence numbers
  5600. * @instance: Adapter soft state
  5601. * @eli: FW event log sequence numbers information
  5602. *
  5603. * FW maintains a log of all events in a non-volatile area. Upper layers would
  5604. * usually find out the latest sequence number of the events, the seq number at
  5605. * the boot etc. They would "read" all the events below the latest seq number
  5606. * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
  5607. * number), they would subsribe to AEN (asynchronous event notification) and
  5608. * wait for the events to happen.
  5609. */
  5610. static int
  5611. megasas_get_seq_num(struct megasas_instance *instance,
  5612. struct megasas_evt_log_info *eli)
  5613. {
  5614. struct megasas_cmd *cmd;
  5615. struct megasas_dcmd_frame *dcmd;
  5616. struct megasas_evt_log_info *el_info;
  5617. dma_addr_t el_info_h = 0;
  5618. int ret;
  5619. cmd = megasas_get_cmd(instance);
  5620. if (!cmd) {
  5621. return -ENOMEM;
  5622. }
  5623. dcmd = &cmd->frame->dcmd;
  5624. el_info = dma_alloc_coherent(&instance->pdev->dev,
  5625. sizeof(struct megasas_evt_log_info),
  5626. &el_info_h, GFP_KERNEL);
  5627. if (!el_info) {
  5628. megasas_return_cmd(instance, cmd);
  5629. return -ENOMEM;
  5630. }
  5631. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  5632. dcmd->cmd = MFI_CMD_DCMD;
  5633. dcmd->cmd_status = 0x0;
  5634. dcmd->sge_count = 1;
  5635. dcmd->flags = MFI_FRAME_DIR_READ;
  5636. dcmd->timeout = 0;
  5637. dcmd->pad_0 = 0;
  5638. dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
  5639. dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
  5640. megasas_set_dma_settings(instance, dcmd, el_info_h,
  5641. sizeof(struct megasas_evt_log_info));
  5642. ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
  5643. if (ret != DCMD_SUCCESS) {
  5644. dev_err(&instance->pdev->dev, "Failed from %s %d\n",
  5645. __func__, __LINE__);
  5646. goto dcmd_failed;
  5647. }
  5648. /*
  5649. * Copy the data back into callers buffer
  5650. */
  5651. eli->newest_seq_num = el_info->newest_seq_num;
  5652. eli->oldest_seq_num = el_info->oldest_seq_num;
  5653. eli->clear_seq_num = el_info->clear_seq_num;
  5654. eli->shutdown_seq_num = el_info->shutdown_seq_num;
  5655. eli->boot_seq_num = el_info->boot_seq_num;
  5656. dcmd_failed:
  5657. dma_free_coherent(&instance->pdev->dev,
  5658. sizeof(struct megasas_evt_log_info),
  5659. el_info, el_info_h);
  5660. megasas_return_cmd(instance, cmd);
  5661. return ret;
  5662. }
  5663. /**
  5664. * megasas_register_aen - Registers for asynchronous event notification
  5665. * @instance: Adapter soft state
  5666. * @seq_num: The starting sequence number
  5667. * @class_locale_word: Class of the event
  5668. *
  5669. * This function subscribes for AEN for events beyond the @seq_num. It requests
  5670. * to be notified if and only if the event is of type @class_locale
  5671. */
  5672. static int
  5673. megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
  5674. u32 class_locale_word)
  5675. {
  5676. int ret_val;
  5677. struct megasas_cmd *cmd;
  5678. struct megasas_dcmd_frame *dcmd;
  5679. union megasas_evt_class_locale curr_aen;
  5680. union megasas_evt_class_locale prev_aen;
  5681. /*
  5682. * If there an AEN pending already (aen_cmd), check if the
  5683. * class_locale of that pending AEN is inclusive of the new
  5684. * AEN request we currently have. If it is, then we don't have
  5685. * to do anything. In other words, whichever events the current
  5686. * AEN request is subscribing to, have already been subscribed
  5687. * to.
  5688. *
  5689. * If the old_cmd is _not_ inclusive, then we have to abort
  5690. * that command, form a class_locale that is superset of both
  5691. * old and current and re-issue to the FW
  5692. */
  5693. curr_aen.word = class_locale_word;
  5694. if (instance->aen_cmd) {
  5695. prev_aen.word =
  5696. le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
  5697. if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
  5698. (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
  5699. dev_info(&instance->pdev->dev,
  5700. "%s %d out of range class %d send by application\n",
  5701. __func__, __LINE__, curr_aen.members.class);
  5702. return 0;
  5703. }
  5704. /*
  5705. * A class whose enum value is smaller is inclusive of all
  5706. * higher values. If a PROGRESS (= -1) was previously
  5707. * registered, then a new registration requests for higher
  5708. * classes need not be sent to FW. They are automatically
  5709. * included.
  5710. *
  5711. * Locale numbers don't have such hierarchy. They are bitmap
  5712. * values
  5713. */
  5714. if ((prev_aen.members.class <= curr_aen.members.class) &&
  5715. !((prev_aen.members.locale & curr_aen.members.locale) ^
  5716. curr_aen.members.locale)) {
  5717. /*
  5718. * Previously issued event registration includes
  5719. * current request. Nothing to do.
  5720. */
  5721. return 0;
  5722. } else {
  5723. curr_aen.members.locale |= prev_aen.members.locale;
  5724. if (prev_aen.members.class < curr_aen.members.class)
  5725. curr_aen.members.class = prev_aen.members.class;
  5726. instance->aen_cmd->abort_aen = 1;
  5727. ret_val = megasas_issue_blocked_abort_cmd(instance,
  5728. instance->
  5729. aen_cmd, 30);
  5730. if (ret_val) {
  5731. dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
  5732. "previous AEN command\n");
  5733. return ret_val;
  5734. }
  5735. }
  5736. }
  5737. cmd = megasas_get_cmd(instance);
  5738. if (!cmd)
  5739. return -ENOMEM;
  5740. dcmd = &cmd->frame->dcmd;
  5741. memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
  5742. /*
  5743. * Prepare DCMD for aen registration
  5744. */
  5745. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  5746. dcmd->cmd = MFI_CMD_DCMD;
  5747. dcmd->cmd_status = 0x0;
  5748. dcmd->sge_count = 1;
  5749. dcmd->flags = MFI_FRAME_DIR_READ;
  5750. dcmd->timeout = 0;
  5751. dcmd->pad_0 = 0;
  5752. dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
  5753. dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
  5754. dcmd->mbox.w[0] = cpu_to_le32(seq_num);
  5755. instance->last_seq_num = seq_num;
  5756. dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
  5757. megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
  5758. sizeof(struct megasas_evt_detail));
  5759. if (instance->aen_cmd != NULL) {
  5760. megasas_return_cmd(instance, cmd);
  5761. return 0;
  5762. }
  5763. /*
  5764. * Store reference to the cmd used to register for AEN. When an
  5765. * application wants us to register for AEN, we have to abort this
  5766. * cmd and re-register with a new EVENT LOCALE supplied by that app
  5767. */
  5768. instance->aen_cmd = cmd;
  5769. /*
  5770. * Issue the aen registration frame
  5771. */
  5772. instance->instancet->issue_dcmd(instance, cmd);
  5773. return 0;
  5774. }
  5775. /* megasas_get_target_prop - Send DCMD with below details to firmware.
  5776. *
  5777. * This DCMD will fetch few properties of LD/system PD defined
  5778. * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
  5779. *
  5780. * DCMD send by drivers whenever new target is added to the OS.
  5781. *
  5782. * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP
  5783. * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD.
  5784. * 0 = system PD, 1 = LD.
  5785. * dcmd.mbox.s[1] - TargetID for LD/system PD.
  5786. * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES.
  5787. *
  5788. * @instance: Adapter soft state
  5789. * @sdev: OS provided scsi device
  5790. *
  5791. * Returns 0 on success non-zero on failure.
  5792. */
  5793. int
  5794. megasas_get_target_prop(struct megasas_instance *instance,
  5795. struct scsi_device *sdev)
  5796. {
  5797. int ret;
  5798. struct megasas_cmd *cmd;
  5799. struct megasas_dcmd_frame *dcmd;
  5800. u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
  5801. sdev->id;
  5802. cmd = megasas_get_cmd(instance);
  5803. if (!cmd) {
  5804. dev_err(&instance->pdev->dev,
  5805. "Failed to get cmd %s\n", __func__);
  5806. return -ENOMEM;
  5807. }
  5808. dcmd = &cmd->frame->dcmd;
  5809. memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
  5810. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  5811. dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
  5812. dcmd->mbox.s[1] = cpu_to_le16(targetId);
  5813. dcmd->cmd = MFI_CMD_DCMD;
  5814. dcmd->cmd_status = 0xFF;
  5815. dcmd->sge_count = 1;
  5816. dcmd->flags = MFI_FRAME_DIR_READ;
  5817. dcmd->timeout = 0;
  5818. dcmd->pad_0 = 0;
  5819. dcmd->data_xfer_len =
  5820. cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
  5821. dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
  5822. megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
  5823. sizeof(struct MR_TARGET_PROPERTIES));
  5824. if ((instance->adapter_type != MFI_SERIES) &&
  5825. !instance->mask_interrupts)
  5826. ret = megasas_issue_blocked_cmd(instance,
  5827. cmd, MFI_IO_TIMEOUT_SECS);
  5828. else
  5829. ret = megasas_issue_polled(instance, cmd);
  5830. switch (ret) {
  5831. case DCMD_TIMEOUT:
  5832. switch (dcmd_timeout_ocr_possible(instance)) {
  5833. case INITIATE_OCR:
  5834. cmd->flags |= DRV_DCMD_SKIP_REFIRE;
  5835. mutex_unlock(&instance->reset_mutex);
  5836. megasas_reset_fusion(instance->host,
  5837. MFI_IO_TIMEOUT_OCR);
  5838. mutex_lock(&instance->reset_mutex);
  5839. break;
  5840. case KILL_ADAPTER:
  5841. megaraid_sas_kill_hba(instance);
  5842. break;
  5843. case IGNORE_TIMEOUT:
  5844. dev_info(&instance->pdev->dev,
  5845. "Ignore DCMD timeout: %s %d\n",
  5846. __func__, __LINE__);
  5847. break;
  5848. }
  5849. break;
  5850. default:
  5851. megasas_return_cmd(instance, cmd);
  5852. }
  5853. if (ret != DCMD_SUCCESS)
  5854. dev_err(&instance->pdev->dev,
  5855. "return from %s %d return value %d\n",
  5856. __func__, __LINE__, ret);
  5857. return ret;
  5858. }
  5859. /**
  5860. * megasas_start_aen - Subscribes to AEN during driver load time
  5861. * @instance: Adapter soft state
  5862. */
  5863. static int megasas_start_aen(struct megasas_instance *instance)
  5864. {
  5865. struct megasas_evt_log_info eli;
  5866. union megasas_evt_class_locale class_locale;
  5867. /*
  5868. * Get the latest sequence number from FW
  5869. */
  5870. memset(&eli, 0, sizeof(eli));
  5871. if (megasas_get_seq_num(instance, &eli))
  5872. return -1;
  5873. /*
  5874. * Register AEN with FW for latest sequence number plus 1
  5875. */
  5876. class_locale.members.reserved = 0;
  5877. class_locale.members.locale = MR_EVT_LOCALE_ALL;
  5878. class_locale.members.class = MR_EVT_CLASS_DEBUG;
  5879. return megasas_register_aen(instance,
  5880. le32_to_cpu(eli.newest_seq_num) + 1,
  5881. class_locale.word);
  5882. }
  5883. /**
  5884. * megasas_io_attach - Attaches this driver to SCSI mid-layer
  5885. * @instance: Adapter soft state
  5886. */
  5887. static int megasas_io_attach(struct megasas_instance *instance)
  5888. {
  5889. struct Scsi_Host *host = instance->host;
  5890. /*
  5891. * Export parameters required by SCSI mid-layer
  5892. */
  5893. host->unique_id = instance->unique_id;
  5894. host->can_queue = instance->max_scsi_cmds;
  5895. host->this_id = instance->init_id;
  5896. host->sg_tablesize = instance->max_num_sge;
  5897. if (instance->fw_support_ieee)
  5898. instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
  5899. /*
  5900. * Check if the module parameter value for max_sectors can be used
  5901. */
  5902. if (max_sectors && max_sectors < instance->max_sectors_per_req)
  5903. instance->max_sectors_per_req = max_sectors;
  5904. else {
  5905. if (max_sectors) {
  5906. if (((instance->pdev->device ==
  5907. PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
  5908. (instance->pdev->device ==
  5909. PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
  5910. (max_sectors <= MEGASAS_MAX_SECTORS)) {
  5911. instance->max_sectors_per_req = max_sectors;
  5912. } else {
  5913. dev_info(&instance->pdev->dev, "max_sectors should be > 0"
  5914. "and <= %d (or < 1MB for GEN2 controller)\n",
  5915. instance->max_sectors_per_req);
  5916. }
  5917. }
  5918. }
  5919. host->max_sectors = instance->max_sectors_per_req;
  5920. host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
  5921. host->max_channel = MEGASAS_MAX_CHANNELS - 1;
  5922. host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
  5923. host->max_lun = MEGASAS_MAX_LUN;
  5924. host->max_cmd_len = 16;
  5925. /* Use shared host tagset only for fusion adaptors
  5926. * if there are managed interrupts (smp affinity enabled case).
  5927. * Single msix_vectors in kdump, so shared host tag is also disabled.
  5928. */
  5929. host->host_tagset = 0;
  5930. host->nr_hw_queues = 1;
  5931. if ((instance->adapter_type != MFI_SERIES) &&
  5932. (instance->msix_vectors > instance->low_latency_index_start) &&
  5933. host_tagset_enable &&
  5934. instance->smp_affinity_enable) {
  5935. host->host_tagset = 1;
  5936. host->nr_hw_queues = instance->msix_vectors -
  5937. instance->low_latency_index_start + instance->iopoll_q_count;
  5938. if (instance->iopoll_q_count)
  5939. host->nr_maps = 3;
  5940. } else {
  5941. instance->iopoll_q_count = 0;
  5942. }
  5943. dev_info(&instance->pdev->dev,
  5944. "Max firmware commands: %d shared with default "
  5945. "hw_queues = %d poll_queues %d\n", instance->max_fw_cmds,
  5946. host->nr_hw_queues - instance->iopoll_q_count,
  5947. instance->iopoll_q_count);
  5948. /*
  5949. * Notify the mid-layer about the new controller
  5950. */
  5951. if (scsi_add_host(host, &instance->pdev->dev)) {
  5952. dev_err(&instance->pdev->dev,
  5953. "Failed to add host from %s %d\n",
  5954. __func__, __LINE__);
  5955. return -ENODEV;
  5956. }
  5957. return 0;
  5958. }
  5959. /**
  5960. * megasas_set_dma_mask - Set DMA mask for supported controllers
  5961. *
  5962. * @instance: Adapter soft state
  5963. * Description:
  5964. *
  5965. * For Ventura, driver/FW will operate in 63bit DMA addresses.
  5966. *
  5967. * For invader-
  5968. * By default, driver/FW will operate in 32bit DMA addresses
  5969. * for consistent DMA mapping but if 32 bit consistent
  5970. * DMA mask fails, driver will try with 63 bit consistent
  5971. * mask provided FW is true 63bit DMA capable
  5972. *
  5973. * For older controllers(Thunderbolt and MFI based adapters)-
  5974. * driver/FW will operate in 32 bit consistent DMA addresses.
  5975. */
  5976. static int
  5977. megasas_set_dma_mask(struct megasas_instance *instance)
  5978. {
  5979. u64 consistent_mask;
  5980. struct pci_dev *pdev;
  5981. u32 scratch_pad_1;
  5982. pdev = instance->pdev;
  5983. consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
  5984. DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
  5985. if (IS_DMA64) {
  5986. if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
  5987. dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
  5988. goto fail_set_dma_mask;
  5989. if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
  5990. (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
  5991. dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
  5992. /*
  5993. * If 32 bit DMA mask fails, then try for 64 bit mask
  5994. * for FW capable of handling 64 bit DMA.
  5995. */
  5996. scratch_pad_1 = megasas_readl
  5997. (instance, &instance->reg_set->outbound_scratch_pad_1);
  5998. if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
  5999. goto fail_set_dma_mask;
  6000. else if (dma_set_mask_and_coherent(&pdev->dev,
  6001. DMA_BIT_MASK(63)))
  6002. goto fail_set_dma_mask;
  6003. }
  6004. } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
  6005. goto fail_set_dma_mask;
  6006. if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
  6007. instance->consistent_mask_64bit = false;
  6008. else
  6009. instance->consistent_mask_64bit = true;
  6010. dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
  6011. ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
  6012. (instance->consistent_mask_64bit ? "63" : "32"));
  6013. return 0;
  6014. fail_set_dma_mask:
  6015. dev_err(&pdev->dev, "Failed to set DMA mask\n");
  6016. return -1;
  6017. }
  6018. /*
  6019. * megasas_set_adapter_type - Set adapter type.
  6020. * Supported controllers can be divided in
  6021. * different categories-
  6022. * enum MR_ADAPTER_TYPE {
  6023. * MFI_SERIES = 1,
  6024. * THUNDERBOLT_SERIES = 2,
  6025. * INVADER_SERIES = 3,
  6026. * VENTURA_SERIES = 4,
  6027. * AERO_SERIES = 5,
  6028. * };
  6029. * @instance: Adapter soft state
  6030. * return: void
  6031. */
  6032. static inline void megasas_set_adapter_type(struct megasas_instance *instance)
  6033. {
  6034. if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
  6035. (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
  6036. instance->adapter_type = MFI_SERIES;
  6037. } else {
  6038. switch (instance->pdev->device) {
  6039. case PCI_DEVICE_ID_LSI_AERO_10E1:
  6040. case PCI_DEVICE_ID_LSI_AERO_10E2:
  6041. case PCI_DEVICE_ID_LSI_AERO_10E5:
  6042. case PCI_DEVICE_ID_LSI_AERO_10E6:
  6043. instance->adapter_type = AERO_SERIES;
  6044. break;
  6045. case PCI_DEVICE_ID_LSI_VENTURA:
  6046. case PCI_DEVICE_ID_LSI_CRUSADER:
  6047. case PCI_DEVICE_ID_LSI_HARPOON:
  6048. case PCI_DEVICE_ID_LSI_TOMCAT:
  6049. case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
  6050. case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
  6051. instance->adapter_type = VENTURA_SERIES;
  6052. break;
  6053. case PCI_DEVICE_ID_LSI_FUSION:
  6054. case PCI_DEVICE_ID_LSI_PLASMA:
  6055. instance->adapter_type = THUNDERBOLT_SERIES;
  6056. break;
  6057. case PCI_DEVICE_ID_LSI_INVADER:
  6058. case PCI_DEVICE_ID_LSI_INTRUDER:
  6059. case PCI_DEVICE_ID_LSI_INTRUDER_24:
  6060. case PCI_DEVICE_ID_LSI_CUTLASS_52:
  6061. case PCI_DEVICE_ID_LSI_CUTLASS_53:
  6062. case PCI_DEVICE_ID_LSI_FURY:
  6063. instance->adapter_type = INVADER_SERIES;
  6064. break;
  6065. default: /* For all other supported controllers */
  6066. instance->adapter_type = MFI_SERIES;
  6067. break;
  6068. }
  6069. }
  6070. }
  6071. static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
  6072. {
  6073. instance->producer = dma_alloc_coherent(&instance->pdev->dev,
  6074. sizeof(u32), &instance->producer_h, GFP_KERNEL);
  6075. instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
  6076. sizeof(u32), &instance->consumer_h, GFP_KERNEL);
  6077. if (!instance->producer || !instance->consumer) {
  6078. dev_err(&instance->pdev->dev,
  6079. "Failed to allocate memory for producer, consumer\n");
  6080. return -1;
  6081. }
  6082. *instance->producer = 0;
  6083. *instance->consumer = 0;
  6084. return 0;
  6085. }
  6086. /**
  6087. * megasas_alloc_ctrl_mem - Allocate per controller memory for core data
  6088. * structures which are not common across MFI
  6089. * adapters and fusion adapters.
  6090. * For MFI based adapters, allocate producer and
  6091. * consumer buffers. For fusion adapters, allocate
  6092. * memory for fusion context.
  6093. * @instance: Adapter soft state
  6094. * return: 0 for SUCCESS
  6095. */
  6096. static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
  6097. {
  6098. instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int),
  6099. GFP_KERNEL);
  6100. if (!instance->reply_map)
  6101. return -ENOMEM;
  6102. switch (instance->adapter_type) {
  6103. case MFI_SERIES:
  6104. if (megasas_alloc_mfi_ctrl_mem(instance))
  6105. return -ENOMEM;
  6106. break;
  6107. case AERO_SERIES:
  6108. case VENTURA_SERIES:
  6109. case THUNDERBOLT_SERIES:
  6110. case INVADER_SERIES:
  6111. if (megasas_alloc_fusion_context(instance))
  6112. return -ENOMEM;
  6113. break;
  6114. }
  6115. return 0;
  6116. }
  6117. /*
  6118. * megasas_free_ctrl_mem - Free fusion context for fusion adapters and
  6119. * producer, consumer buffers for MFI adapters
  6120. *
  6121. * @instance - Adapter soft instance
  6122. *
  6123. */
  6124. static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
  6125. {
  6126. kfree(instance->reply_map);
  6127. if (instance->adapter_type == MFI_SERIES) {
  6128. if (instance->producer)
  6129. dma_free_coherent(&instance->pdev->dev, sizeof(u32),
  6130. instance->producer,
  6131. instance->producer_h);
  6132. if (instance->consumer)
  6133. dma_free_coherent(&instance->pdev->dev, sizeof(u32),
  6134. instance->consumer,
  6135. instance->consumer_h);
  6136. } else {
  6137. megasas_free_fusion_context(instance);
  6138. }
  6139. }
  6140. /**
  6141. * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during
  6142. * driver load time
  6143. *
  6144. * @instance: Adapter soft instance
  6145. *
  6146. * @return: O for SUCCESS
  6147. */
  6148. static inline
  6149. int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
  6150. {
  6151. struct pci_dev *pdev = instance->pdev;
  6152. struct fusion_context *fusion = instance->ctrl_context;
  6153. instance->evt_detail = dma_alloc_coherent(&pdev->dev,
  6154. sizeof(struct megasas_evt_detail),
  6155. &instance->evt_detail_h, GFP_KERNEL);
  6156. if (!instance->evt_detail) {
  6157. dev_err(&instance->pdev->dev,
  6158. "Failed to allocate event detail buffer\n");
  6159. return -ENOMEM;
  6160. }
  6161. if (fusion) {
  6162. fusion->ioc_init_request =
  6163. dma_alloc_coherent(&pdev->dev,
  6164. sizeof(struct MPI2_IOC_INIT_REQUEST),
  6165. &fusion->ioc_init_request_phys,
  6166. GFP_KERNEL);
  6167. if (!fusion->ioc_init_request) {
  6168. dev_err(&pdev->dev,
  6169. "Failed to allocate ioc init request\n");
  6170. return -ENOMEM;
  6171. }
  6172. instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
  6173. sizeof(struct MR_SNAPDUMP_PROPERTIES),
  6174. &instance->snapdump_prop_h, GFP_KERNEL);
  6175. if (!instance->snapdump_prop)
  6176. dev_err(&pdev->dev,
  6177. "Failed to allocate snapdump properties buffer\n");
  6178. instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
  6179. HOST_DEVICE_LIST_SZ,
  6180. &instance->host_device_list_buf_h,
  6181. GFP_KERNEL);
  6182. if (!instance->host_device_list_buf) {
  6183. dev_err(&pdev->dev,
  6184. "Failed to allocate targetid list buffer\n");
  6185. return -ENOMEM;
  6186. }
  6187. }
  6188. instance->pd_list_buf =
  6189. dma_alloc_coherent(&pdev->dev,
  6190. MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
  6191. &instance->pd_list_buf_h, GFP_KERNEL);
  6192. if (!instance->pd_list_buf) {
  6193. dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
  6194. return -ENOMEM;
  6195. }
  6196. instance->ctrl_info_buf =
  6197. dma_alloc_coherent(&pdev->dev,
  6198. sizeof(struct megasas_ctrl_info),
  6199. &instance->ctrl_info_buf_h, GFP_KERNEL);
  6200. if (!instance->ctrl_info_buf) {
  6201. dev_err(&pdev->dev,
  6202. "Failed to allocate controller info buffer\n");
  6203. return -ENOMEM;
  6204. }
  6205. instance->ld_list_buf =
  6206. dma_alloc_coherent(&pdev->dev,
  6207. sizeof(struct MR_LD_LIST),
  6208. &instance->ld_list_buf_h, GFP_KERNEL);
  6209. if (!instance->ld_list_buf) {
  6210. dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
  6211. return -ENOMEM;
  6212. }
  6213. instance->ld_targetid_list_buf =
  6214. dma_alloc_coherent(&pdev->dev,
  6215. sizeof(struct MR_LD_TARGETID_LIST),
  6216. &instance->ld_targetid_list_buf_h, GFP_KERNEL);
  6217. if (!instance->ld_targetid_list_buf) {
  6218. dev_err(&pdev->dev,
  6219. "Failed to allocate LD targetid list buffer\n");
  6220. return -ENOMEM;
  6221. }
  6222. if (!reset_devices) {
  6223. instance->system_info_buf =
  6224. dma_alloc_coherent(&pdev->dev,
  6225. sizeof(struct MR_DRV_SYSTEM_INFO),
  6226. &instance->system_info_h, GFP_KERNEL);
  6227. instance->pd_info =
  6228. dma_alloc_coherent(&pdev->dev,
  6229. sizeof(struct MR_PD_INFO),
  6230. &instance->pd_info_h, GFP_KERNEL);
  6231. instance->tgt_prop =
  6232. dma_alloc_coherent(&pdev->dev,
  6233. sizeof(struct MR_TARGET_PROPERTIES),
  6234. &instance->tgt_prop_h, GFP_KERNEL);
  6235. instance->crash_dump_buf =
  6236. dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
  6237. &instance->crash_dump_h, GFP_KERNEL);
  6238. if (!instance->system_info_buf)
  6239. dev_err(&instance->pdev->dev,
  6240. "Failed to allocate system info buffer\n");
  6241. if (!instance->pd_info)
  6242. dev_err(&instance->pdev->dev,
  6243. "Failed to allocate pd_info buffer\n");
  6244. if (!instance->tgt_prop)
  6245. dev_err(&instance->pdev->dev,
  6246. "Failed to allocate tgt_prop buffer\n");
  6247. if (!instance->crash_dump_buf)
  6248. dev_err(&instance->pdev->dev,
  6249. "Failed to allocate crash dump buffer\n");
  6250. }
  6251. return 0;
  6252. }
  6253. /*
  6254. * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated
  6255. * during driver load time
  6256. *
  6257. * @instance- Adapter soft instance
  6258. *
  6259. */
  6260. static inline
  6261. void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
  6262. {
  6263. struct pci_dev *pdev = instance->pdev;
  6264. struct fusion_context *fusion = instance->ctrl_context;
  6265. if (instance->evt_detail)
  6266. dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
  6267. instance->evt_detail,
  6268. instance->evt_detail_h);
  6269. if (fusion && fusion->ioc_init_request)
  6270. dma_free_coherent(&pdev->dev,
  6271. sizeof(struct MPI2_IOC_INIT_REQUEST),
  6272. fusion->ioc_init_request,
  6273. fusion->ioc_init_request_phys);
  6274. if (instance->pd_list_buf)
  6275. dma_free_coherent(&pdev->dev,
  6276. MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
  6277. instance->pd_list_buf,
  6278. instance->pd_list_buf_h);
  6279. if (instance->ld_list_buf)
  6280. dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
  6281. instance->ld_list_buf,
  6282. instance->ld_list_buf_h);
  6283. if (instance->ld_targetid_list_buf)
  6284. dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
  6285. instance->ld_targetid_list_buf,
  6286. instance->ld_targetid_list_buf_h);
  6287. if (instance->ctrl_info_buf)
  6288. dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
  6289. instance->ctrl_info_buf,
  6290. instance->ctrl_info_buf_h);
  6291. if (instance->system_info_buf)
  6292. dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
  6293. instance->system_info_buf,
  6294. instance->system_info_h);
  6295. if (instance->pd_info)
  6296. dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
  6297. instance->pd_info, instance->pd_info_h);
  6298. if (instance->tgt_prop)
  6299. dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
  6300. instance->tgt_prop, instance->tgt_prop_h);
  6301. if (instance->crash_dump_buf)
  6302. dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
  6303. instance->crash_dump_buf,
  6304. instance->crash_dump_h);
  6305. if (instance->snapdump_prop)
  6306. dma_free_coherent(&pdev->dev,
  6307. sizeof(struct MR_SNAPDUMP_PROPERTIES),
  6308. instance->snapdump_prop,
  6309. instance->snapdump_prop_h);
  6310. if (instance->host_device_list_buf)
  6311. dma_free_coherent(&pdev->dev,
  6312. HOST_DEVICE_LIST_SZ,
  6313. instance->host_device_list_buf,
  6314. instance->host_device_list_buf_h);
  6315. }
  6316. /*
  6317. * megasas_init_ctrl_params - Initialize controller's instance
  6318. * parameters before FW init
  6319. * @instance - Adapter soft instance
  6320. * @return - void
  6321. */
  6322. static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
  6323. {
  6324. instance->fw_crash_state = UNAVAILABLE;
  6325. megasas_poll_wait_aen = 0;
  6326. instance->issuepend_done = 1;
  6327. atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
  6328. /*
  6329. * Initialize locks and queues
  6330. */
  6331. INIT_LIST_HEAD(&instance->cmd_pool);
  6332. INIT_LIST_HEAD(&instance->internal_reset_pending_q);
  6333. atomic_set(&instance->fw_outstanding, 0);
  6334. atomic64_set(&instance->total_io_count, 0);
  6335. init_waitqueue_head(&instance->int_cmd_wait_q);
  6336. init_waitqueue_head(&instance->abort_cmd_wait_q);
  6337. mutex_init(&instance->crashdump_lock);
  6338. spin_lock_init(&instance->mfi_pool_lock);
  6339. spin_lock_init(&instance->hba_lock);
  6340. spin_lock_init(&instance->stream_lock);
  6341. spin_lock_init(&instance->completion_lock);
  6342. mutex_init(&instance->reset_mutex);
  6343. if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
  6344. (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
  6345. instance->flag_ieee = 1;
  6346. instance->flag = 0;
  6347. instance->unload = 1;
  6348. instance->last_time = 0;
  6349. instance->disableOnlineCtrlReset = 1;
  6350. instance->UnevenSpanSupport = 0;
  6351. instance->smp_affinity_enable = smp_affinity_enable ? true : false;
  6352. instance->msix_load_balance = false;
  6353. if (instance->adapter_type != MFI_SERIES)
  6354. INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
  6355. else
  6356. INIT_WORK(&instance->work_init, process_fw_state_change_wq);
  6357. }
  6358. /**
  6359. * megasas_probe_one - PCI hotplug entry point
  6360. * @pdev: PCI device structure
  6361. * @id: PCI ids of supported hotplugged adapter
  6362. */
  6363. static int megasas_probe_one(struct pci_dev *pdev,
  6364. const struct pci_device_id *id)
  6365. {
  6366. int rval, pos;
  6367. struct Scsi_Host *host;
  6368. struct megasas_instance *instance;
  6369. u16 control = 0;
  6370. switch (pdev->device) {
  6371. case PCI_DEVICE_ID_LSI_AERO_10E0:
  6372. case PCI_DEVICE_ID_LSI_AERO_10E3:
  6373. case PCI_DEVICE_ID_LSI_AERO_10E4:
  6374. case PCI_DEVICE_ID_LSI_AERO_10E7:
  6375. dev_err(&pdev->dev, "Adapter is in non secure mode\n");
  6376. return 1;
  6377. case PCI_DEVICE_ID_LSI_AERO_10E1:
  6378. case PCI_DEVICE_ID_LSI_AERO_10E5:
  6379. dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
  6380. break;
  6381. }
  6382. /* Reset MSI-X in the kdump kernel */
  6383. if (reset_devices) {
  6384. pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
  6385. if (pos) {
  6386. pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
  6387. &control);
  6388. if (control & PCI_MSIX_FLAGS_ENABLE) {
  6389. dev_info(&pdev->dev, "resetting MSI-X\n");
  6390. pci_write_config_word(pdev,
  6391. pos + PCI_MSIX_FLAGS,
  6392. control &
  6393. ~PCI_MSIX_FLAGS_ENABLE);
  6394. }
  6395. }
  6396. }
  6397. /*
  6398. * PCI prepping: enable device set bus mastering and dma mask
  6399. */
  6400. rval = pci_enable_device_mem(pdev);
  6401. if (rval) {
  6402. return rval;
  6403. }
  6404. pci_set_master(pdev);
  6405. host = scsi_host_alloc(&megasas_template,
  6406. sizeof(struct megasas_instance));
  6407. if (!host) {
  6408. dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
  6409. goto fail_alloc_instance;
  6410. }
  6411. instance = (struct megasas_instance *)host->hostdata;
  6412. memset(instance, 0, sizeof(*instance));
  6413. atomic_set(&instance->fw_reset_no_pci_access, 0);
  6414. /*
  6415. * Initialize PCI related and misc parameters
  6416. */
  6417. instance->pdev = pdev;
  6418. instance->host = host;
  6419. instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
  6420. instance->init_id = MEGASAS_DEFAULT_INIT_ID;
  6421. megasas_set_adapter_type(instance);
  6422. /*
  6423. * Initialize MFI Firmware
  6424. */
  6425. if (megasas_init_fw(instance))
  6426. goto fail_init_mfi;
  6427. if (instance->requestorId) {
  6428. if (instance->PlasmaFW111) {
  6429. instance->vf_affiliation_111 =
  6430. dma_alloc_coherent(&pdev->dev,
  6431. sizeof(struct MR_LD_VF_AFFILIATION_111),
  6432. &instance->vf_affiliation_111_h,
  6433. GFP_KERNEL);
  6434. if (!instance->vf_affiliation_111)
  6435. dev_warn(&pdev->dev, "Can't allocate "
  6436. "memory for VF affiliation buffer\n");
  6437. } else {
  6438. instance->vf_affiliation =
  6439. dma_alloc_coherent(&pdev->dev,
  6440. (MAX_LOGICAL_DRIVES + 1) *
  6441. sizeof(struct MR_LD_VF_AFFILIATION),
  6442. &instance->vf_affiliation_h,
  6443. GFP_KERNEL);
  6444. if (!instance->vf_affiliation)
  6445. dev_warn(&pdev->dev, "Can't allocate "
  6446. "memory for VF affiliation buffer\n");
  6447. }
  6448. }
  6449. /*
  6450. * Store instance in PCI softstate
  6451. */
  6452. pci_set_drvdata(pdev, instance);
  6453. /*
  6454. * Add this controller to megasas_mgmt_info structure so that it
  6455. * can be exported to management applications
  6456. */
  6457. megasas_mgmt_info.count++;
  6458. megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
  6459. megasas_mgmt_info.max_index++;
  6460. /*
  6461. * Register with SCSI mid-layer
  6462. */
  6463. if (megasas_io_attach(instance))
  6464. goto fail_io_attach;
  6465. instance->unload = 0;
  6466. /*
  6467. * Trigger SCSI to scan our drives
  6468. */
  6469. if (!instance->enable_fw_dev_list ||
  6470. (instance->host_device_list_buf->count > 0))
  6471. scsi_scan_host(host);
  6472. /*
  6473. * Initiate AEN (Asynchronous Event Notification)
  6474. */
  6475. if (megasas_start_aen(instance)) {
  6476. dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
  6477. goto fail_start_aen;
  6478. }
  6479. megasas_setup_debugfs(instance);
  6480. /* Get current SR-IOV LD/VF affiliation */
  6481. if (instance->requestorId)
  6482. megasas_get_ld_vf_affiliation(instance, 1);
  6483. return 0;
  6484. fail_start_aen:
  6485. instance->unload = 1;
  6486. scsi_remove_host(instance->host);
  6487. fail_io_attach:
  6488. megasas_mgmt_info.count--;
  6489. megasas_mgmt_info.max_index--;
  6490. megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
  6491. if (instance->requestorId && !instance->skip_heartbeat_timer_del)
  6492. del_timer_sync(&instance->sriov_heartbeat_timer);
  6493. instance->instancet->disable_intr(instance);
  6494. megasas_destroy_irqs(instance);
  6495. if (instance->adapter_type != MFI_SERIES)
  6496. megasas_release_fusion(instance);
  6497. else
  6498. megasas_release_mfi(instance);
  6499. if (instance->msix_vectors)
  6500. pci_free_irq_vectors(instance->pdev);
  6501. instance->msix_vectors = 0;
  6502. if (instance->fw_crash_state != UNAVAILABLE)
  6503. megasas_free_host_crash_buffer(instance);
  6504. if (instance->adapter_type != MFI_SERIES)
  6505. megasas_fusion_stop_watchdog(instance);
  6506. fail_init_mfi:
  6507. scsi_host_put(host);
  6508. fail_alloc_instance:
  6509. pci_disable_device(pdev);
  6510. return -ENODEV;
  6511. }
  6512. /**
  6513. * megasas_flush_cache - Requests FW to flush all its caches
  6514. * @instance: Adapter soft state
  6515. */
  6516. static void megasas_flush_cache(struct megasas_instance *instance)
  6517. {
  6518. struct megasas_cmd *cmd;
  6519. struct megasas_dcmd_frame *dcmd;
  6520. if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
  6521. return;
  6522. cmd = megasas_get_cmd(instance);
  6523. if (!cmd)
  6524. return;
  6525. dcmd = &cmd->frame->dcmd;
  6526. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  6527. dcmd->cmd = MFI_CMD_DCMD;
  6528. dcmd->cmd_status = 0x0;
  6529. dcmd->sge_count = 0;
  6530. dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
  6531. dcmd->timeout = 0;
  6532. dcmd->pad_0 = 0;
  6533. dcmd->data_xfer_len = 0;
  6534. dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
  6535. dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
  6536. if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
  6537. != DCMD_SUCCESS) {
  6538. dev_err(&instance->pdev->dev,
  6539. "return from %s %d\n", __func__, __LINE__);
  6540. return;
  6541. }
  6542. megasas_return_cmd(instance, cmd);
  6543. }
  6544. /**
  6545. * megasas_shutdown_controller - Instructs FW to shutdown the controller
  6546. * @instance: Adapter soft state
  6547. * @opcode: Shutdown/Hibernate
  6548. */
  6549. static void megasas_shutdown_controller(struct megasas_instance *instance,
  6550. u32 opcode)
  6551. {
  6552. struct megasas_cmd *cmd;
  6553. struct megasas_dcmd_frame *dcmd;
  6554. if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
  6555. return;
  6556. cmd = megasas_get_cmd(instance);
  6557. if (!cmd)
  6558. return;
  6559. if (instance->aen_cmd)
  6560. megasas_issue_blocked_abort_cmd(instance,
  6561. instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
  6562. if (instance->map_update_cmd)
  6563. megasas_issue_blocked_abort_cmd(instance,
  6564. instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
  6565. if (instance->jbod_seq_cmd)
  6566. megasas_issue_blocked_abort_cmd(instance,
  6567. instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
  6568. dcmd = &cmd->frame->dcmd;
  6569. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  6570. dcmd->cmd = MFI_CMD_DCMD;
  6571. dcmd->cmd_status = 0x0;
  6572. dcmd->sge_count = 0;
  6573. dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
  6574. dcmd->timeout = 0;
  6575. dcmd->pad_0 = 0;
  6576. dcmd->data_xfer_len = 0;
  6577. dcmd->opcode = cpu_to_le32(opcode);
  6578. if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
  6579. != DCMD_SUCCESS) {
  6580. dev_err(&instance->pdev->dev,
  6581. "return from %s %d\n", __func__, __LINE__);
  6582. return;
  6583. }
  6584. megasas_return_cmd(instance, cmd);
  6585. }
  6586. /**
  6587. * megasas_suspend - driver suspend entry point
  6588. * @dev: Device structure
  6589. */
  6590. static int __maybe_unused
  6591. megasas_suspend(struct device *dev)
  6592. {
  6593. struct megasas_instance *instance;
  6594. instance = dev_get_drvdata(dev);
  6595. if (!instance)
  6596. return 0;
  6597. instance->unload = 1;
  6598. dev_info(dev, "%s is called\n", __func__);
  6599. /* Shutdown SR-IOV heartbeat timer */
  6600. if (instance->requestorId && !instance->skip_heartbeat_timer_del)
  6601. del_timer_sync(&instance->sriov_heartbeat_timer);
  6602. /* Stop the FW fault detection watchdog */
  6603. if (instance->adapter_type != MFI_SERIES)
  6604. megasas_fusion_stop_watchdog(instance);
  6605. megasas_flush_cache(instance);
  6606. megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
  6607. /* cancel the delayed work if this work still in queue */
  6608. if (instance->ev != NULL) {
  6609. struct megasas_aen_event *ev = instance->ev;
  6610. cancel_delayed_work_sync(&ev->hotplug_work);
  6611. instance->ev = NULL;
  6612. }
  6613. tasklet_kill(&instance->isr_tasklet);
  6614. pci_set_drvdata(instance->pdev, instance);
  6615. instance->instancet->disable_intr(instance);
  6616. megasas_destroy_irqs(instance);
  6617. if (instance->msix_vectors)
  6618. pci_free_irq_vectors(instance->pdev);
  6619. return 0;
  6620. }
  6621. /**
  6622. * megasas_resume- driver resume entry point
  6623. * @dev: Device structure
  6624. */
  6625. static int __maybe_unused
  6626. megasas_resume(struct device *dev)
  6627. {
  6628. int rval;
  6629. struct Scsi_Host *host;
  6630. struct megasas_instance *instance;
  6631. u32 status_reg;
  6632. instance = dev_get_drvdata(dev);
  6633. if (!instance)
  6634. return 0;
  6635. host = instance->host;
  6636. dev_info(dev, "%s is called\n", __func__);
  6637. /*
  6638. * We expect the FW state to be READY
  6639. */
  6640. if (megasas_transition_to_ready(instance, 0)) {
  6641. dev_info(&instance->pdev->dev,
  6642. "Failed to transition controller to ready from %s!\n",
  6643. __func__);
  6644. if (instance->adapter_type != MFI_SERIES) {
  6645. status_reg =
  6646. instance->instancet->read_fw_status_reg(instance);
  6647. if (!(status_reg & MFI_RESET_ADAPTER) ||
  6648. ((megasas_adp_reset_wait_for_ready
  6649. (instance, true, 0)) == FAILED))
  6650. goto fail_ready_state;
  6651. } else {
  6652. atomic_set(&instance->fw_reset_no_pci_access, 1);
  6653. instance->instancet->adp_reset
  6654. (instance, instance->reg_set);
  6655. atomic_set(&instance->fw_reset_no_pci_access, 0);
  6656. /* waiting for about 30 seconds before retry */
  6657. ssleep(30);
  6658. if (megasas_transition_to_ready(instance, 0))
  6659. goto fail_ready_state;
  6660. }
  6661. dev_info(&instance->pdev->dev,
  6662. "FW restarted successfully from %s!\n",
  6663. __func__);
  6664. }
  6665. if (megasas_set_dma_mask(instance))
  6666. goto fail_set_dma_mask;
  6667. /*
  6668. * Initialize MFI Firmware
  6669. */
  6670. atomic_set(&instance->fw_outstanding, 0);
  6671. atomic_set(&instance->ldio_outstanding, 0);
  6672. /* Now re-enable MSI-X */
  6673. if (instance->msix_vectors)
  6674. megasas_alloc_irq_vectors(instance);
  6675. if (!instance->msix_vectors) {
  6676. rval = pci_alloc_irq_vectors(instance->pdev, 1, 1,
  6677. PCI_IRQ_LEGACY);
  6678. if (rval < 0)
  6679. goto fail_reenable_msix;
  6680. }
  6681. megasas_setup_reply_map(instance);
  6682. if (instance->adapter_type != MFI_SERIES) {
  6683. megasas_reset_reply_desc(instance);
  6684. if (megasas_ioc_init_fusion(instance)) {
  6685. megasas_free_cmds(instance);
  6686. megasas_free_cmds_fusion(instance);
  6687. goto fail_init_mfi;
  6688. }
  6689. if (!megasas_get_map_info(instance))
  6690. megasas_sync_map_info(instance);
  6691. } else {
  6692. *instance->producer = 0;
  6693. *instance->consumer = 0;
  6694. if (megasas_issue_init_mfi(instance))
  6695. goto fail_init_mfi;
  6696. }
  6697. if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
  6698. goto fail_init_mfi;
  6699. tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
  6700. (unsigned long)instance);
  6701. if (instance->msix_vectors ?
  6702. megasas_setup_irqs_msix(instance, 0) :
  6703. megasas_setup_irqs_ioapic(instance))
  6704. goto fail_init_mfi;
  6705. if (instance->adapter_type != MFI_SERIES)
  6706. megasas_setup_irq_poll(instance);
  6707. /* Re-launch SR-IOV heartbeat timer */
  6708. if (instance->requestorId) {
  6709. if (!megasas_sriov_start_heartbeat(instance, 0))
  6710. megasas_start_timer(instance);
  6711. else {
  6712. instance->skip_heartbeat_timer_del = 1;
  6713. goto fail_init_mfi;
  6714. }
  6715. }
  6716. instance->instancet->enable_intr(instance);
  6717. megasas_setup_jbod_map(instance);
  6718. instance->unload = 0;
  6719. /*
  6720. * Initiate AEN (Asynchronous Event Notification)
  6721. */
  6722. if (megasas_start_aen(instance))
  6723. dev_err(&instance->pdev->dev, "Start AEN failed\n");
  6724. /* Re-launch FW fault watchdog */
  6725. if (instance->adapter_type != MFI_SERIES)
  6726. if (megasas_fusion_start_watchdog(instance) != SUCCESS)
  6727. goto fail_start_watchdog;
  6728. return 0;
  6729. fail_start_watchdog:
  6730. if (instance->requestorId && !instance->skip_heartbeat_timer_del)
  6731. del_timer_sync(&instance->sriov_heartbeat_timer);
  6732. fail_init_mfi:
  6733. megasas_free_ctrl_dma_buffers(instance);
  6734. megasas_free_ctrl_mem(instance);
  6735. scsi_host_put(host);
  6736. fail_reenable_msix:
  6737. fail_set_dma_mask:
  6738. fail_ready_state:
  6739. return -ENODEV;
  6740. }
  6741. static inline int
  6742. megasas_wait_for_adapter_operational(struct megasas_instance *instance)
  6743. {
  6744. int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
  6745. int i;
  6746. u8 adp_state;
  6747. for (i = 0; i < wait_time; i++) {
  6748. adp_state = atomic_read(&instance->adprecovery);
  6749. if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
  6750. (adp_state == MEGASAS_HW_CRITICAL_ERROR))
  6751. break;
  6752. if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
  6753. dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
  6754. msleep(1000);
  6755. }
  6756. if (adp_state != MEGASAS_HBA_OPERATIONAL) {
  6757. dev_info(&instance->pdev->dev,
  6758. "%s HBA failed to become operational, adp_state %d\n",
  6759. __func__, adp_state);
  6760. return 1;
  6761. }
  6762. return 0;
  6763. }
  6764. /**
  6765. * megasas_detach_one - PCI hot"un"plug entry point
  6766. * @pdev: PCI device structure
  6767. */
  6768. static void megasas_detach_one(struct pci_dev *pdev)
  6769. {
  6770. int i;
  6771. struct Scsi_Host *host;
  6772. struct megasas_instance *instance;
  6773. struct fusion_context *fusion;
  6774. size_t pd_seq_map_sz;
  6775. instance = pci_get_drvdata(pdev);
  6776. if (!instance)
  6777. return;
  6778. host = instance->host;
  6779. fusion = instance->ctrl_context;
  6780. /* Shutdown SR-IOV heartbeat timer */
  6781. if (instance->requestorId && !instance->skip_heartbeat_timer_del)
  6782. del_timer_sync(&instance->sriov_heartbeat_timer);
  6783. /* Stop the FW fault detection watchdog */
  6784. if (instance->adapter_type != MFI_SERIES)
  6785. megasas_fusion_stop_watchdog(instance);
  6786. if (instance->fw_crash_state != UNAVAILABLE)
  6787. megasas_free_host_crash_buffer(instance);
  6788. scsi_remove_host(instance->host);
  6789. instance->unload = 1;
  6790. if (megasas_wait_for_adapter_operational(instance))
  6791. goto skip_firing_dcmds;
  6792. megasas_flush_cache(instance);
  6793. megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
  6794. skip_firing_dcmds:
  6795. /* cancel the delayed work if this work still in queue*/
  6796. if (instance->ev != NULL) {
  6797. struct megasas_aen_event *ev = instance->ev;
  6798. cancel_delayed_work_sync(&ev->hotplug_work);
  6799. instance->ev = NULL;
  6800. }
  6801. /* cancel all wait events */
  6802. wake_up_all(&instance->int_cmd_wait_q);
  6803. tasklet_kill(&instance->isr_tasklet);
  6804. /*
  6805. * Take the instance off the instance array. Note that we will not
  6806. * decrement the max_index. We let this array be sparse array
  6807. */
  6808. for (i = 0; i < megasas_mgmt_info.max_index; i++) {
  6809. if (megasas_mgmt_info.instance[i] == instance) {
  6810. megasas_mgmt_info.count--;
  6811. megasas_mgmt_info.instance[i] = NULL;
  6812. break;
  6813. }
  6814. }
  6815. instance->instancet->disable_intr(instance);
  6816. megasas_destroy_irqs(instance);
  6817. if (instance->msix_vectors)
  6818. pci_free_irq_vectors(instance->pdev);
  6819. if (instance->adapter_type >= VENTURA_SERIES) {
  6820. for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
  6821. kfree(fusion->stream_detect_by_ld[i]);
  6822. kfree(fusion->stream_detect_by_ld);
  6823. fusion->stream_detect_by_ld = NULL;
  6824. }
  6825. if (instance->adapter_type != MFI_SERIES) {
  6826. megasas_release_fusion(instance);
  6827. pd_seq_map_sz =
  6828. struct_size((struct MR_PD_CFG_SEQ_NUM_SYNC *)0,
  6829. seq, MAX_PHYSICAL_DEVICES);
  6830. for (i = 0; i < 2 ; i++) {
  6831. if (fusion->ld_map[i])
  6832. dma_free_coherent(&instance->pdev->dev,
  6833. fusion->max_map_sz,
  6834. fusion->ld_map[i],
  6835. fusion->ld_map_phys[i]);
  6836. if (fusion->ld_drv_map[i]) {
  6837. if (is_vmalloc_addr(fusion->ld_drv_map[i]))
  6838. vfree(fusion->ld_drv_map[i]);
  6839. else
  6840. free_pages((ulong)fusion->ld_drv_map[i],
  6841. fusion->drv_map_pages);
  6842. }
  6843. if (fusion->pd_seq_sync[i])
  6844. dma_free_coherent(&instance->pdev->dev,
  6845. pd_seq_map_sz,
  6846. fusion->pd_seq_sync[i],
  6847. fusion->pd_seq_phys[i]);
  6848. }
  6849. } else {
  6850. megasas_release_mfi(instance);
  6851. }
  6852. if (instance->vf_affiliation)
  6853. dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
  6854. sizeof(struct MR_LD_VF_AFFILIATION),
  6855. instance->vf_affiliation,
  6856. instance->vf_affiliation_h);
  6857. if (instance->vf_affiliation_111)
  6858. dma_free_coherent(&pdev->dev,
  6859. sizeof(struct MR_LD_VF_AFFILIATION_111),
  6860. instance->vf_affiliation_111,
  6861. instance->vf_affiliation_111_h);
  6862. if (instance->hb_host_mem)
  6863. dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
  6864. instance->hb_host_mem,
  6865. instance->hb_host_mem_h);
  6866. megasas_free_ctrl_dma_buffers(instance);
  6867. megasas_free_ctrl_mem(instance);
  6868. megasas_destroy_debugfs(instance);
  6869. scsi_host_put(host);
  6870. pci_disable_device(pdev);
  6871. }
  6872. /**
  6873. * megasas_shutdown - Shutdown entry point
  6874. * @pdev: PCI device structure
  6875. */
  6876. static void megasas_shutdown(struct pci_dev *pdev)
  6877. {
  6878. struct megasas_instance *instance = pci_get_drvdata(pdev);
  6879. if (!instance)
  6880. return;
  6881. instance->unload = 1;
  6882. if (megasas_wait_for_adapter_operational(instance))
  6883. goto skip_firing_dcmds;
  6884. megasas_flush_cache(instance);
  6885. megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
  6886. skip_firing_dcmds:
  6887. instance->instancet->disable_intr(instance);
  6888. megasas_destroy_irqs(instance);
  6889. if (instance->msix_vectors)
  6890. pci_free_irq_vectors(instance->pdev);
  6891. }
  6892. /*
  6893. * megasas_mgmt_open - char node "open" entry point
  6894. * @inode: char node inode
  6895. * @filep: char node file
  6896. */
  6897. static int megasas_mgmt_open(struct inode *inode, struct file *filep)
  6898. {
  6899. /*
  6900. * Allow only those users with admin rights
  6901. */
  6902. if (!capable(CAP_SYS_ADMIN))
  6903. return -EACCES;
  6904. return 0;
  6905. }
  6906. /*
  6907. * megasas_mgmt_fasync - Async notifier registration from applications
  6908. * @fd: char node file descriptor number
  6909. * @filep: char node file
  6910. * @mode: notifier on/off
  6911. *
  6912. * This function adds the calling process to a driver global queue. When an
  6913. * event occurs, SIGIO will be sent to all processes in this queue.
  6914. */
  6915. static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
  6916. {
  6917. int rc;
  6918. mutex_lock(&megasas_async_queue_mutex);
  6919. rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
  6920. mutex_unlock(&megasas_async_queue_mutex);
  6921. if (rc >= 0) {
  6922. /* For sanity check when we get ioctl */
  6923. filep->private_data = filep;
  6924. return 0;
  6925. }
  6926. printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
  6927. return rc;
  6928. }
  6929. /*
  6930. * megasas_mgmt_poll - char node "poll" entry point
  6931. * @filep: char node file
  6932. * @wait: Events to poll for
  6933. */
  6934. static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
  6935. {
  6936. __poll_t mask;
  6937. unsigned long flags;
  6938. poll_wait(file, &megasas_poll_wait, wait);
  6939. spin_lock_irqsave(&poll_aen_lock, flags);
  6940. if (megasas_poll_wait_aen)
  6941. mask = (EPOLLIN | EPOLLRDNORM);
  6942. else
  6943. mask = 0;
  6944. megasas_poll_wait_aen = 0;
  6945. spin_unlock_irqrestore(&poll_aen_lock, flags);
  6946. return mask;
  6947. }
  6948. /*
  6949. * megasas_set_crash_dump_params_ioctl:
  6950. * Send CRASH_DUMP_MODE DCMD to all controllers
  6951. * @cmd: MFI command frame
  6952. */
  6953. static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
  6954. {
  6955. struct megasas_instance *local_instance;
  6956. int i, error = 0;
  6957. int crash_support;
  6958. crash_support = cmd->frame->dcmd.mbox.w[0];
  6959. for (i = 0; i < megasas_mgmt_info.max_index; i++) {
  6960. local_instance = megasas_mgmt_info.instance[i];
  6961. if (local_instance && local_instance->crash_dump_drv_support) {
  6962. if ((atomic_read(&local_instance->adprecovery) ==
  6963. MEGASAS_HBA_OPERATIONAL) &&
  6964. !megasas_set_crash_dump_params(local_instance,
  6965. crash_support)) {
  6966. local_instance->crash_dump_app_support =
  6967. crash_support;
  6968. dev_info(&local_instance->pdev->dev,
  6969. "Application firmware crash "
  6970. "dump mode set success\n");
  6971. error = 0;
  6972. } else {
  6973. dev_info(&local_instance->pdev->dev,
  6974. "Application firmware crash "
  6975. "dump mode set failed\n");
  6976. error = -1;
  6977. }
  6978. }
  6979. }
  6980. return error;
  6981. }
  6982. /**
  6983. * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
  6984. * @instance: Adapter soft state
  6985. * @user_ioc: User's ioctl packet
  6986. * @ioc: ioctl packet
  6987. */
  6988. static int
  6989. megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
  6990. struct megasas_iocpacket __user * user_ioc,
  6991. struct megasas_iocpacket *ioc)
  6992. {
  6993. struct megasas_sge64 *kern_sge64 = NULL;
  6994. struct megasas_sge32 *kern_sge32 = NULL;
  6995. struct megasas_cmd *cmd;
  6996. void *kbuff_arr[MAX_IOCTL_SGE];
  6997. dma_addr_t buf_handle = 0;
  6998. int error = 0, i;
  6999. void *sense = NULL;
  7000. dma_addr_t sense_handle;
  7001. void *sense_ptr;
  7002. u32 opcode = 0;
  7003. int ret = DCMD_SUCCESS;
  7004. memset(kbuff_arr, 0, sizeof(kbuff_arr));
  7005. if (ioc->sge_count > MAX_IOCTL_SGE) {
  7006. dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n",
  7007. ioc->sge_count, MAX_IOCTL_SGE);
  7008. return -EINVAL;
  7009. }
  7010. if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
  7011. ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
  7012. !instance->support_nvme_passthru) ||
  7013. ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) &&
  7014. !instance->support_pci_lane_margining)) {
  7015. dev_err(&instance->pdev->dev,
  7016. "Received invalid ioctl command 0x%x\n",
  7017. ioc->frame.hdr.cmd);
  7018. return -ENOTSUPP;
  7019. }
  7020. cmd = megasas_get_cmd(instance);
  7021. if (!cmd) {
  7022. dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
  7023. return -ENOMEM;
  7024. }
  7025. /*
  7026. * User's IOCTL packet has 2 frames (maximum). Copy those two
  7027. * frames into our cmd's frames. cmd->frame's context will get
  7028. * overwritten when we copy from user's frames. So set that value
  7029. * alone separately
  7030. */
  7031. memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
  7032. cmd->frame->hdr.context = cpu_to_le32(cmd->index);
  7033. cmd->frame->hdr.pad_0 = 0;
  7034. cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
  7035. if (instance->consistent_mask_64bit)
  7036. cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
  7037. MFI_FRAME_SENSE64));
  7038. else
  7039. cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
  7040. MFI_FRAME_SENSE64));
  7041. if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
  7042. opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
  7043. if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
  7044. mutex_lock(&instance->reset_mutex);
  7045. if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
  7046. megasas_return_cmd(instance, cmd);
  7047. mutex_unlock(&instance->reset_mutex);
  7048. return -1;
  7049. }
  7050. mutex_unlock(&instance->reset_mutex);
  7051. }
  7052. if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
  7053. error = megasas_set_crash_dump_params_ioctl(cmd);
  7054. megasas_return_cmd(instance, cmd);
  7055. return error;
  7056. }
  7057. /*
  7058. * The management interface between applications and the fw uses
  7059. * MFI frames. E.g, RAID configuration changes, LD property changes
  7060. * etc are accomplishes through different kinds of MFI frames. The
  7061. * driver needs to care only about substituting user buffers with
  7062. * kernel buffers in SGLs. The location of SGL is embedded in the
  7063. * struct iocpacket itself.
  7064. */
  7065. if (instance->consistent_mask_64bit)
  7066. kern_sge64 = (struct megasas_sge64 *)
  7067. ((unsigned long)cmd->frame + ioc->sgl_off);
  7068. else
  7069. kern_sge32 = (struct megasas_sge32 *)
  7070. ((unsigned long)cmd->frame + ioc->sgl_off);
  7071. /*
  7072. * For each user buffer, create a mirror buffer and copy in
  7073. */
  7074. for (i = 0; i < ioc->sge_count; i++) {
  7075. if (!ioc->sgl[i].iov_len)
  7076. continue;
  7077. kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
  7078. ioc->sgl[i].iov_len,
  7079. &buf_handle, GFP_KERNEL);
  7080. if (!kbuff_arr[i]) {
  7081. dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
  7082. "kernel SGL buffer for IOCTL\n");
  7083. error = -ENOMEM;
  7084. goto out;
  7085. }
  7086. /*
  7087. * We don't change the dma_coherent_mask, so
  7088. * dma_alloc_coherent only returns 32bit addresses
  7089. */
  7090. if (instance->consistent_mask_64bit) {
  7091. kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
  7092. kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
  7093. } else {
  7094. kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
  7095. kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
  7096. }
  7097. /*
  7098. * We created a kernel buffer corresponding to the
  7099. * user buffer. Now copy in from the user buffer
  7100. */
  7101. if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
  7102. (u32) (ioc->sgl[i].iov_len))) {
  7103. error = -EFAULT;
  7104. goto out;
  7105. }
  7106. }
  7107. if (ioc->sense_len) {
  7108. /* make sure the pointer is part of the frame */
  7109. if (ioc->sense_off >
  7110. (sizeof(union megasas_frame) - sizeof(__le64))) {
  7111. error = -EINVAL;
  7112. goto out;
  7113. }
  7114. sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
  7115. &sense_handle, GFP_KERNEL);
  7116. if (!sense) {
  7117. error = -ENOMEM;
  7118. goto out;
  7119. }
  7120. /* always store 64 bits regardless of addressing */
  7121. sense_ptr = (void *)cmd->frame + ioc->sense_off;
  7122. put_unaligned_le64(sense_handle, sense_ptr);
  7123. }
  7124. /*
  7125. * Set the sync_cmd flag so that the ISR knows not to complete this
  7126. * cmd to the SCSI mid-layer
  7127. */
  7128. cmd->sync_cmd = 1;
  7129. ret = megasas_issue_blocked_cmd(instance, cmd, 0);
  7130. switch (ret) {
  7131. case DCMD_INIT:
  7132. case DCMD_BUSY:
  7133. cmd->sync_cmd = 0;
  7134. dev_err(&instance->pdev->dev,
  7135. "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
  7136. __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
  7137. cmd->cmd_status_drv);
  7138. error = -EBUSY;
  7139. goto out;
  7140. }
  7141. cmd->sync_cmd = 0;
  7142. if (instance->unload == 1) {
  7143. dev_info(&instance->pdev->dev, "Driver unload is in progress "
  7144. "don't submit data to application\n");
  7145. goto out;
  7146. }
  7147. /*
  7148. * copy out the kernel buffers to user buffers
  7149. */
  7150. for (i = 0; i < ioc->sge_count; i++) {
  7151. if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
  7152. ioc->sgl[i].iov_len)) {
  7153. error = -EFAULT;
  7154. goto out;
  7155. }
  7156. }
  7157. /*
  7158. * copy out the sense
  7159. */
  7160. if (ioc->sense_len) {
  7161. void __user *uptr;
  7162. /*
  7163. * sense_ptr points to the location that has the user
  7164. * sense buffer address
  7165. */
  7166. sense_ptr = (void *)ioc->frame.raw + ioc->sense_off;
  7167. if (in_compat_syscall())
  7168. uptr = compat_ptr(get_unaligned((compat_uptr_t *)
  7169. sense_ptr));
  7170. else
  7171. uptr = get_unaligned((void __user **)sense_ptr);
  7172. if (copy_to_user(uptr, sense, ioc->sense_len)) {
  7173. dev_err(&instance->pdev->dev, "Failed to copy out to user "
  7174. "sense data\n");
  7175. error = -EFAULT;
  7176. goto out;
  7177. }
  7178. }
  7179. /*
  7180. * copy the status codes returned by the fw
  7181. */
  7182. if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
  7183. &cmd->frame->hdr.cmd_status, sizeof(u8))) {
  7184. dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
  7185. error = -EFAULT;
  7186. }
  7187. out:
  7188. if (sense) {
  7189. dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
  7190. sense, sense_handle);
  7191. }
  7192. for (i = 0; i < ioc->sge_count; i++) {
  7193. if (kbuff_arr[i]) {
  7194. if (instance->consistent_mask_64bit)
  7195. dma_free_coherent(&instance->pdev->dev,
  7196. le32_to_cpu(kern_sge64[i].length),
  7197. kbuff_arr[i],
  7198. le64_to_cpu(kern_sge64[i].phys_addr));
  7199. else
  7200. dma_free_coherent(&instance->pdev->dev,
  7201. le32_to_cpu(kern_sge32[i].length),
  7202. kbuff_arr[i],
  7203. le32_to_cpu(kern_sge32[i].phys_addr));
  7204. kbuff_arr[i] = NULL;
  7205. }
  7206. }
  7207. megasas_return_cmd(instance, cmd);
  7208. return error;
  7209. }
  7210. static struct megasas_iocpacket *
  7211. megasas_compat_iocpacket_get_user(void __user *arg)
  7212. {
  7213. struct megasas_iocpacket *ioc;
  7214. struct compat_megasas_iocpacket __user *cioc = arg;
  7215. size_t size;
  7216. int err = -EFAULT;
  7217. int i;
  7218. ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
  7219. if (!ioc)
  7220. return ERR_PTR(-ENOMEM);
  7221. size = offsetof(struct megasas_iocpacket, frame) + sizeof(ioc->frame);
  7222. if (copy_from_user(ioc, arg, size))
  7223. goto out;
  7224. for (i = 0; i < MAX_IOCTL_SGE; i++) {
  7225. compat_uptr_t iov_base;
  7226. if (get_user(iov_base, &cioc->sgl[i].iov_base) ||
  7227. get_user(ioc->sgl[i].iov_len, &cioc->sgl[i].iov_len))
  7228. goto out;
  7229. ioc->sgl[i].iov_base = compat_ptr(iov_base);
  7230. }
  7231. return ioc;
  7232. out:
  7233. kfree(ioc);
  7234. return ERR_PTR(err);
  7235. }
  7236. static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
  7237. {
  7238. struct megasas_iocpacket __user *user_ioc =
  7239. (struct megasas_iocpacket __user *)arg;
  7240. struct megasas_iocpacket *ioc;
  7241. struct megasas_instance *instance;
  7242. int error;
  7243. if (in_compat_syscall())
  7244. ioc = megasas_compat_iocpacket_get_user(user_ioc);
  7245. else
  7246. ioc = memdup_user(user_ioc, sizeof(struct megasas_iocpacket));
  7247. if (IS_ERR(ioc))
  7248. return PTR_ERR(ioc);
  7249. instance = megasas_lookup_instance(ioc->host_no);
  7250. if (!instance) {
  7251. error = -ENODEV;
  7252. goto out_kfree_ioc;
  7253. }
  7254. /* Block ioctls in VF mode */
  7255. if (instance->requestorId && !allow_vf_ioctls) {
  7256. error = -ENODEV;
  7257. goto out_kfree_ioc;
  7258. }
  7259. if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
  7260. dev_err(&instance->pdev->dev, "Controller in crit error\n");
  7261. error = -ENODEV;
  7262. goto out_kfree_ioc;
  7263. }
  7264. if (instance->unload == 1) {
  7265. error = -ENODEV;
  7266. goto out_kfree_ioc;
  7267. }
  7268. if (down_interruptible(&instance->ioctl_sem)) {
  7269. error = -ERESTARTSYS;
  7270. goto out_kfree_ioc;
  7271. }
  7272. if (megasas_wait_for_adapter_operational(instance)) {
  7273. error = -ENODEV;
  7274. goto out_up;
  7275. }
  7276. error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
  7277. out_up:
  7278. up(&instance->ioctl_sem);
  7279. out_kfree_ioc:
  7280. kfree(ioc);
  7281. return error;
  7282. }
  7283. static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
  7284. {
  7285. struct megasas_instance *instance;
  7286. struct megasas_aen aen;
  7287. int error;
  7288. if (file->private_data != file) {
  7289. printk(KERN_DEBUG "megasas: fasync_helper was not "
  7290. "called first\n");
  7291. return -EINVAL;
  7292. }
  7293. if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
  7294. return -EFAULT;
  7295. instance = megasas_lookup_instance(aen.host_no);
  7296. if (!instance)
  7297. return -ENODEV;
  7298. if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
  7299. return -ENODEV;
  7300. }
  7301. if (instance->unload == 1) {
  7302. return -ENODEV;
  7303. }
  7304. if (megasas_wait_for_adapter_operational(instance))
  7305. return -ENODEV;
  7306. mutex_lock(&instance->reset_mutex);
  7307. error = megasas_register_aen(instance, aen.seq_num,
  7308. aen.class_locale_word);
  7309. mutex_unlock(&instance->reset_mutex);
  7310. return error;
  7311. }
  7312. /**
  7313. * megasas_mgmt_ioctl - char node ioctl entry point
  7314. * @file: char device file pointer
  7315. * @cmd: ioctl command
  7316. * @arg: ioctl command arguments address
  7317. */
  7318. static long
  7319. megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  7320. {
  7321. switch (cmd) {
  7322. case MEGASAS_IOC_FIRMWARE:
  7323. return megasas_mgmt_ioctl_fw(file, arg);
  7324. case MEGASAS_IOC_GET_AEN:
  7325. return megasas_mgmt_ioctl_aen(file, arg);
  7326. }
  7327. return -ENOTTY;
  7328. }
  7329. #ifdef CONFIG_COMPAT
  7330. static long
  7331. megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
  7332. unsigned long arg)
  7333. {
  7334. switch (cmd) {
  7335. case MEGASAS_IOC_FIRMWARE32:
  7336. return megasas_mgmt_ioctl_fw(file, arg);
  7337. case MEGASAS_IOC_GET_AEN:
  7338. return megasas_mgmt_ioctl_aen(file, arg);
  7339. }
  7340. return -ENOTTY;
  7341. }
  7342. #endif
  7343. /*
  7344. * File operations structure for management interface
  7345. */
  7346. static const struct file_operations megasas_mgmt_fops = {
  7347. .owner = THIS_MODULE,
  7348. .open = megasas_mgmt_open,
  7349. .fasync = megasas_mgmt_fasync,
  7350. .unlocked_ioctl = megasas_mgmt_ioctl,
  7351. .poll = megasas_mgmt_poll,
  7352. #ifdef CONFIG_COMPAT
  7353. .compat_ioctl = megasas_mgmt_compat_ioctl,
  7354. #endif
  7355. .llseek = noop_llseek,
  7356. };
  7357. static SIMPLE_DEV_PM_OPS(megasas_pm_ops, megasas_suspend, megasas_resume);
  7358. /*
  7359. * PCI hotplug support registration structure
  7360. */
  7361. static struct pci_driver megasas_pci_driver = {
  7362. .name = "megaraid_sas",
  7363. .id_table = megasas_pci_table,
  7364. .probe = megasas_probe_one,
  7365. .remove = megasas_detach_one,
  7366. .driver.pm = &megasas_pm_ops,
  7367. .shutdown = megasas_shutdown,
  7368. };
  7369. /*
  7370. * Sysfs driver attributes
  7371. */
  7372. static ssize_t version_show(struct device_driver *dd, char *buf)
  7373. {
  7374. return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
  7375. MEGASAS_VERSION);
  7376. }
  7377. static DRIVER_ATTR_RO(version);
  7378. static ssize_t release_date_show(struct device_driver *dd, char *buf)
  7379. {
  7380. return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
  7381. MEGASAS_RELDATE);
  7382. }
  7383. static DRIVER_ATTR_RO(release_date);
  7384. static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
  7385. {
  7386. return sprintf(buf, "%u\n", support_poll_for_event);
  7387. }
  7388. static DRIVER_ATTR_RO(support_poll_for_event);
  7389. static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
  7390. {
  7391. return sprintf(buf, "%u\n", support_device_change);
  7392. }
  7393. static DRIVER_ATTR_RO(support_device_change);
  7394. static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
  7395. {
  7396. return sprintf(buf, "%u\n", megasas_dbg_lvl);
  7397. }
  7398. static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
  7399. size_t count)
  7400. {
  7401. int retval = count;
  7402. if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
  7403. printk(KERN_ERR "megasas: could not set dbg_lvl\n");
  7404. retval = -EINVAL;
  7405. }
  7406. return retval;
  7407. }
  7408. static DRIVER_ATTR_RW(dbg_lvl);
  7409. static ssize_t
  7410. support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
  7411. {
  7412. return sprintf(buf, "%u\n", support_nvme_encapsulation);
  7413. }
  7414. static DRIVER_ATTR_RO(support_nvme_encapsulation);
  7415. static ssize_t
  7416. support_pci_lane_margining_show(struct device_driver *dd, char *buf)
  7417. {
  7418. return sprintf(buf, "%u\n", support_pci_lane_margining);
  7419. }
  7420. static DRIVER_ATTR_RO(support_pci_lane_margining);
  7421. static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
  7422. {
  7423. sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
  7424. scsi_remove_device(sdev);
  7425. scsi_device_put(sdev);
  7426. }
  7427. /**
  7428. * megasas_update_device_list - Update the PD and LD device list from FW
  7429. * after an AEN event notification
  7430. * @instance: Adapter soft state
  7431. * @event_type: Indicates type of event (PD or LD event)
  7432. *
  7433. * @return: Success or failure
  7434. *
  7435. * Issue DCMDs to Firmware to update the internal device list in driver.
  7436. * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
  7437. * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
  7438. */
  7439. static
  7440. int megasas_update_device_list(struct megasas_instance *instance,
  7441. int event_type)
  7442. {
  7443. int dcmd_ret;
  7444. if (instance->enable_fw_dev_list) {
  7445. return megasas_host_device_list_query(instance, false);
  7446. } else {
  7447. if (event_type & SCAN_PD_CHANNEL) {
  7448. dcmd_ret = megasas_get_pd_list(instance);
  7449. if (dcmd_ret != DCMD_SUCCESS)
  7450. return dcmd_ret;
  7451. }
  7452. if (event_type & SCAN_VD_CHANNEL) {
  7453. if (!instance->requestorId ||
  7454. megasas_get_ld_vf_affiliation(instance, 0)) {
  7455. return megasas_ld_list_query(instance,
  7456. MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
  7457. }
  7458. }
  7459. }
  7460. return DCMD_SUCCESS;
  7461. }
  7462. /**
  7463. * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer
  7464. * after an AEN event notification
  7465. * @instance: Adapter soft state
  7466. * @scan_type: Indicates type of devices (PD/LD) to add
  7467. * @return void
  7468. */
  7469. static
  7470. void megasas_add_remove_devices(struct megasas_instance *instance,
  7471. int scan_type)
  7472. {
  7473. int i, j;
  7474. u16 pd_index = 0;
  7475. u16 ld_index = 0;
  7476. u16 channel = 0, id = 0;
  7477. struct Scsi_Host *host;
  7478. struct scsi_device *sdev1;
  7479. struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
  7480. struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
  7481. host = instance->host;
  7482. if (instance->enable_fw_dev_list) {
  7483. targetid_list = instance->host_device_list_buf;
  7484. for (i = 0; i < targetid_list->count; i++) {
  7485. targetid_entry = &targetid_list->host_device_list[i];
  7486. if (targetid_entry->flags.u.bits.is_sys_pd) {
  7487. channel = le16_to_cpu(targetid_entry->target_id) /
  7488. MEGASAS_MAX_DEV_PER_CHANNEL;
  7489. id = le16_to_cpu(targetid_entry->target_id) %
  7490. MEGASAS_MAX_DEV_PER_CHANNEL;
  7491. } else {
  7492. channel = MEGASAS_MAX_PD_CHANNELS +
  7493. (le16_to_cpu(targetid_entry->target_id) /
  7494. MEGASAS_MAX_DEV_PER_CHANNEL);
  7495. id = le16_to_cpu(targetid_entry->target_id) %
  7496. MEGASAS_MAX_DEV_PER_CHANNEL;
  7497. }
  7498. sdev1 = scsi_device_lookup(host, channel, id, 0);
  7499. if (!sdev1) {
  7500. scsi_add_device(host, channel, id, 0);
  7501. } else {
  7502. scsi_device_put(sdev1);
  7503. }
  7504. }
  7505. }
  7506. if (scan_type & SCAN_PD_CHANNEL) {
  7507. for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
  7508. for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
  7509. pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
  7510. sdev1 = scsi_device_lookup(host, i, j, 0);
  7511. if (instance->pd_list[pd_index].driveState ==
  7512. MR_PD_STATE_SYSTEM) {
  7513. if (!sdev1)
  7514. scsi_add_device(host, i, j, 0);
  7515. else
  7516. scsi_device_put(sdev1);
  7517. } else {
  7518. if (sdev1)
  7519. megasas_remove_scsi_device(sdev1);
  7520. }
  7521. }
  7522. }
  7523. }
  7524. if (scan_type & SCAN_VD_CHANNEL) {
  7525. for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
  7526. for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
  7527. ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
  7528. sdev1 = scsi_device_lookup(host,
  7529. MEGASAS_MAX_PD_CHANNELS + i, j, 0);
  7530. if (instance->ld_ids[ld_index] != 0xff) {
  7531. if (!sdev1)
  7532. scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
  7533. else
  7534. scsi_device_put(sdev1);
  7535. } else {
  7536. if (sdev1)
  7537. megasas_remove_scsi_device(sdev1);
  7538. }
  7539. }
  7540. }
  7541. }
  7542. }
  7543. static void
  7544. megasas_aen_polling(struct work_struct *work)
  7545. {
  7546. struct megasas_aen_event *ev =
  7547. container_of(work, struct megasas_aen_event, hotplug_work.work);
  7548. struct megasas_instance *instance = ev->instance;
  7549. union megasas_evt_class_locale class_locale;
  7550. int event_type = 0;
  7551. u32 seq_num;
  7552. u16 ld_target_id;
  7553. int error;
  7554. u8 dcmd_ret = DCMD_SUCCESS;
  7555. struct scsi_device *sdev1;
  7556. if (!instance) {
  7557. printk(KERN_ERR "invalid instance!\n");
  7558. kfree(ev);
  7559. return;
  7560. }
  7561. /* Don't run the event workqueue thread if OCR is running */
  7562. mutex_lock(&instance->reset_mutex);
  7563. instance->ev = NULL;
  7564. if (instance->evt_detail) {
  7565. megasas_decode_evt(instance);
  7566. switch (le32_to_cpu(instance->evt_detail->code)) {
  7567. case MR_EVT_PD_INSERTED:
  7568. case MR_EVT_PD_REMOVED:
  7569. event_type = SCAN_PD_CHANNEL;
  7570. break;
  7571. case MR_EVT_LD_OFFLINE:
  7572. case MR_EVT_LD_DELETED:
  7573. ld_target_id = instance->evt_detail->args.ld.target_id;
  7574. sdev1 = scsi_device_lookup(instance->host,
  7575. MEGASAS_MAX_PD_CHANNELS +
  7576. (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL),
  7577. (ld_target_id % MEGASAS_MAX_DEV_PER_CHANNEL),
  7578. 0);
  7579. if (sdev1)
  7580. megasas_remove_scsi_device(sdev1);
  7581. event_type = SCAN_VD_CHANNEL;
  7582. break;
  7583. case MR_EVT_LD_CREATED:
  7584. event_type = SCAN_VD_CHANNEL;
  7585. break;
  7586. case MR_EVT_CFG_CLEARED:
  7587. case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
  7588. case MR_EVT_FOREIGN_CFG_IMPORTED:
  7589. case MR_EVT_LD_STATE_CHANGE:
  7590. event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
  7591. dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
  7592. instance->host->host_no);
  7593. break;
  7594. case MR_EVT_CTRL_PROP_CHANGED:
  7595. dcmd_ret = megasas_get_ctrl_info(instance);
  7596. if (dcmd_ret == DCMD_SUCCESS &&
  7597. instance->snapdump_wait_time) {
  7598. megasas_get_snapdump_properties(instance);
  7599. dev_info(&instance->pdev->dev,
  7600. "Snap dump wait time\t: %d\n",
  7601. instance->snapdump_wait_time);
  7602. }
  7603. break;
  7604. default:
  7605. event_type = 0;
  7606. break;
  7607. }
  7608. } else {
  7609. dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
  7610. mutex_unlock(&instance->reset_mutex);
  7611. kfree(ev);
  7612. return;
  7613. }
  7614. if (event_type)
  7615. dcmd_ret = megasas_update_device_list(instance, event_type);
  7616. mutex_unlock(&instance->reset_mutex);
  7617. if (event_type && dcmd_ret == DCMD_SUCCESS)
  7618. megasas_add_remove_devices(instance, event_type);
  7619. if (dcmd_ret == DCMD_SUCCESS)
  7620. seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
  7621. else
  7622. seq_num = instance->last_seq_num;
  7623. /* Register AEN with FW for latest sequence number plus 1 */
  7624. class_locale.members.reserved = 0;
  7625. class_locale.members.locale = MR_EVT_LOCALE_ALL;
  7626. class_locale.members.class = MR_EVT_CLASS_DEBUG;
  7627. if (instance->aen_cmd != NULL) {
  7628. kfree(ev);
  7629. return;
  7630. }
  7631. mutex_lock(&instance->reset_mutex);
  7632. error = megasas_register_aen(instance, seq_num,
  7633. class_locale.word);
  7634. if (error)
  7635. dev_err(&instance->pdev->dev,
  7636. "register aen failed error %x\n", error);
  7637. mutex_unlock(&instance->reset_mutex);
  7638. kfree(ev);
  7639. }
  7640. /**
  7641. * megasas_init - Driver load entry point
  7642. */
  7643. static int __init megasas_init(void)
  7644. {
  7645. int rval;
  7646. /*
  7647. * Booted in kdump kernel, minimize memory footprints by
  7648. * disabling few features
  7649. */
  7650. if (reset_devices) {
  7651. msix_vectors = 1;
  7652. rdpq_enable = 0;
  7653. dual_qdepth_disable = 1;
  7654. poll_queues = 0;
  7655. }
  7656. /*
  7657. * Announce driver version and other information
  7658. */
  7659. pr_info("megasas: %s\n", MEGASAS_VERSION);
  7660. megasas_dbg_lvl = 0;
  7661. support_poll_for_event = 2;
  7662. support_device_change = 1;
  7663. support_nvme_encapsulation = true;
  7664. support_pci_lane_margining = true;
  7665. memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
  7666. /*
  7667. * Register character device node
  7668. */
  7669. rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
  7670. if (rval < 0) {
  7671. printk(KERN_DEBUG "megasas: failed to open device node\n");
  7672. return rval;
  7673. }
  7674. megasas_mgmt_majorno = rval;
  7675. megasas_init_debugfs();
  7676. /*
  7677. * Register ourselves as PCI hotplug module
  7678. */
  7679. rval = pci_register_driver(&megasas_pci_driver);
  7680. if (rval) {
  7681. printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
  7682. goto err_pcidrv;
  7683. }
  7684. if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
  7685. (event_log_level > MFI_EVT_CLASS_DEAD)) {
  7686. pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
  7687. event_log_level = MFI_EVT_CLASS_CRITICAL;
  7688. }
  7689. rval = driver_create_file(&megasas_pci_driver.driver,
  7690. &driver_attr_version);
  7691. if (rval)
  7692. goto err_dcf_attr_ver;
  7693. rval = driver_create_file(&megasas_pci_driver.driver,
  7694. &driver_attr_release_date);
  7695. if (rval)
  7696. goto err_dcf_rel_date;
  7697. rval = driver_create_file(&megasas_pci_driver.driver,
  7698. &driver_attr_support_poll_for_event);
  7699. if (rval)
  7700. goto err_dcf_support_poll_for_event;
  7701. rval = driver_create_file(&megasas_pci_driver.driver,
  7702. &driver_attr_dbg_lvl);
  7703. if (rval)
  7704. goto err_dcf_dbg_lvl;
  7705. rval = driver_create_file(&megasas_pci_driver.driver,
  7706. &driver_attr_support_device_change);
  7707. if (rval)
  7708. goto err_dcf_support_device_change;
  7709. rval = driver_create_file(&megasas_pci_driver.driver,
  7710. &driver_attr_support_nvme_encapsulation);
  7711. if (rval)
  7712. goto err_dcf_support_nvme_encapsulation;
  7713. rval = driver_create_file(&megasas_pci_driver.driver,
  7714. &driver_attr_support_pci_lane_margining);
  7715. if (rval)
  7716. goto err_dcf_support_pci_lane_margining;
  7717. return rval;
  7718. err_dcf_support_pci_lane_margining:
  7719. driver_remove_file(&megasas_pci_driver.driver,
  7720. &driver_attr_support_nvme_encapsulation);
  7721. err_dcf_support_nvme_encapsulation:
  7722. driver_remove_file(&megasas_pci_driver.driver,
  7723. &driver_attr_support_device_change);
  7724. err_dcf_support_device_change:
  7725. driver_remove_file(&megasas_pci_driver.driver,
  7726. &driver_attr_dbg_lvl);
  7727. err_dcf_dbg_lvl:
  7728. driver_remove_file(&megasas_pci_driver.driver,
  7729. &driver_attr_support_poll_for_event);
  7730. err_dcf_support_poll_for_event:
  7731. driver_remove_file(&megasas_pci_driver.driver,
  7732. &driver_attr_release_date);
  7733. err_dcf_rel_date:
  7734. driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
  7735. err_dcf_attr_ver:
  7736. pci_unregister_driver(&megasas_pci_driver);
  7737. err_pcidrv:
  7738. megasas_exit_debugfs();
  7739. unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
  7740. return rval;
  7741. }
  7742. /**
  7743. * megasas_exit - Driver unload entry point
  7744. */
  7745. static void __exit megasas_exit(void)
  7746. {
  7747. driver_remove_file(&megasas_pci_driver.driver,
  7748. &driver_attr_dbg_lvl);
  7749. driver_remove_file(&megasas_pci_driver.driver,
  7750. &driver_attr_support_poll_for_event);
  7751. driver_remove_file(&megasas_pci_driver.driver,
  7752. &driver_attr_support_device_change);
  7753. driver_remove_file(&megasas_pci_driver.driver,
  7754. &driver_attr_release_date);
  7755. driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
  7756. driver_remove_file(&megasas_pci_driver.driver,
  7757. &driver_attr_support_nvme_encapsulation);
  7758. driver_remove_file(&megasas_pci_driver.driver,
  7759. &driver_attr_support_pci_lane_margining);
  7760. pci_unregister_driver(&megasas_pci_driver);
  7761. megasas_exit_debugfs();
  7762. unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
  7763. }
  7764. module_init(megasas_init);
  7765. module_exit(megasas_exit);