mpi3mr_os.c 150 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Driver for Broadcom MPI3 Storage Controllers
  4. *
  5. * Copyright (C) 2017-2022 Broadcom Inc.
  6. * (mailto: [email protected])
  7. *
  8. */
  9. #include "mpi3mr.h"
  10. /* global driver scop variables */
  11. LIST_HEAD(mrioc_list);
  12. DEFINE_SPINLOCK(mrioc_list_lock);
  13. static int mrioc_ids;
  14. static int warn_non_secure_ctlr;
  15. atomic64_t event_counter;
  16. MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR);
  17. MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC);
  18. MODULE_LICENSE(MPI3MR_DRIVER_LICENSE);
  19. MODULE_VERSION(MPI3MR_DRIVER_VERSION);
  20. /* Module parameters*/
  21. int prot_mask = -1;
  22. module_param(prot_mask, int, 0);
  23. MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07");
  24. static int prot_guard_mask = 3;
  25. module_param(prot_guard_mask, int, 0);
  26. MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3");
  27. static int logging_level;
  28. module_param(logging_level, int, 0);
  29. MODULE_PARM_DESC(logging_level,
  30. " bits for enabling additional logging info (default=0)");
  31. /* Forward declarations*/
  32. static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
  33. struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx);
  34. #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF)
  35. #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE)
  36. /**
  37. * mpi3mr_host_tag_for_scmd - Get host tag for a scmd
  38. * @mrioc: Adapter instance reference
  39. * @scmd: SCSI command reference
  40. *
  41. * Calculate the host tag based on block tag for a given scmd.
  42. *
  43. * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID.
  44. */
  45. static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
  46. struct scsi_cmnd *scmd)
  47. {
  48. struct scmd_priv *priv = NULL;
  49. u32 unique_tag;
  50. u16 host_tag, hw_queue;
  51. unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
  52. hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
  53. if (hw_queue >= mrioc->num_op_reply_q)
  54. return MPI3MR_HOSTTAG_INVALID;
  55. host_tag = blk_mq_unique_tag_to_tag(unique_tag);
  56. if (WARN_ON(host_tag >= mrioc->max_host_ios))
  57. return MPI3MR_HOSTTAG_INVALID;
  58. priv = scsi_cmd_priv(scmd);
  59. /*host_tag 0 is invalid hence incrementing by 1*/
  60. priv->host_tag = host_tag + 1;
  61. priv->scmd = scmd;
  62. priv->in_lld_scope = 1;
  63. priv->req_q_idx = hw_queue;
  64. priv->meta_chain_idx = -1;
  65. priv->chain_idx = -1;
  66. priv->meta_sg_valid = 0;
  67. return priv->host_tag;
  68. }
  69. /**
  70. * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag
  71. * @mrioc: Adapter instance reference
  72. * @host_tag: Host tag
  73. * @qidx: Operational queue index
  74. *
  75. * Identify the block tag from the host tag and queue index and
  76. * retrieve associated scsi command using scsi_host_find_tag().
  77. *
  78. * Return: SCSI command reference or NULL.
  79. */
  80. static struct scsi_cmnd *mpi3mr_scmd_from_host_tag(
  81. struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx)
  82. {
  83. struct scsi_cmnd *scmd = NULL;
  84. struct scmd_priv *priv = NULL;
  85. u32 unique_tag = host_tag - 1;
  86. if (WARN_ON(host_tag > mrioc->max_host_ios))
  87. goto out;
  88. unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS);
  89. scmd = scsi_host_find_tag(mrioc->shost, unique_tag);
  90. if (scmd) {
  91. priv = scsi_cmd_priv(scmd);
  92. if (!priv->in_lld_scope)
  93. scmd = NULL;
  94. }
  95. out:
  96. return scmd;
  97. }
  98. /**
  99. * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date
  100. * @mrioc: Adapter instance reference
  101. * @scmd: SCSI command reference
  102. *
  103. * Invalidate the SCSI command private data to mark the command
  104. * is not in LLD scope anymore.
  105. *
  106. * Return: Nothing.
  107. */
  108. static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc,
  109. struct scsi_cmnd *scmd)
  110. {
  111. struct scmd_priv *priv = NULL;
  112. priv = scsi_cmd_priv(scmd);
  113. if (WARN_ON(priv->in_lld_scope == 0))
  114. return;
  115. priv->host_tag = MPI3MR_HOSTTAG_INVALID;
  116. priv->req_q_idx = 0xFFFF;
  117. priv->scmd = NULL;
  118. priv->in_lld_scope = 0;
  119. priv->meta_sg_valid = 0;
  120. if (priv->chain_idx >= 0) {
  121. clear_bit(priv->chain_idx, mrioc->chain_bitmap);
  122. priv->chain_idx = -1;
  123. }
  124. if (priv->meta_chain_idx >= 0) {
  125. clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap);
  126. priv->meta_chain_idx = -1;
  127. }
  128. }
  129. static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
  130. struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc);
  131. static void mpi3mr_fwevt_worker(struct work_struct *work);
  132. /**
  133. * mpi3mr_fwevt_free - firmware event memory dealloctor
  134. * @r: k reference pointer of the firmware event
  135. *
  136. * Free firmware event memory when no reference.
  137. */
  138. static void mpi3mr_fwevt_free(struct kref *r)
  139. {
  140. kfree(container_of(r, struct mpi3mr_fwevt, ref_count));
  141. }
  142. /**
  143. * mpi3mr_fwevt_get - k reference incrementor
  144. * @fwevt: Firmware event reference
  145. *
  146. * Increment firmware event reference count.
  147. */
  148. static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt)
  149. {
  150. kref_get(&fwevt->ref_count);
  151. }
  152. /**
  153. * mpi3mr_fwevt_put - k reference decrementor
  154. * @fwevt: Firmware event reference
  155. *
  156. * decrement firmware event reference count.
  157. */
  158. static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt)
  159. {
  160. kref_put(&fwevt->ref_count, mpi3mr_fwevt_free);
  161. }
  162. /**
  163. * mpi3mr_alloc_fwevt - Allocate firmware event
  164. * @len: length of firmware event data to allocate
  165. *
  166. * Allocate firmware event with required length and initialize
  167. * the reference counter.
  168. *
  169. * Return: firmware event reference.
  170. */
  171. static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len)
  172. {
  173. struct mpi3mr_fwevt *fwevt;
  174. fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC);
  175. if (!fwevt)
  176. return NULL;
  177. kref_init(&fwevt->ref_count);
  178. return fwevt;
  179. }
  180. /**
  181. * mpi3mr_fwevt_add_to_list - Add firmware event to the list
  182. * @mrioc: Adapter instance reference
  183. * @fwevt: Firmware event reference
  184. *
  185. * Add the given firmware event to the firmware event list.
  186. *
  187. * Return: Nothing.
  188. */
  189. static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc,
  190. struct mpi3mr_fwevt *fwevt)
  191. {
  192. unsigned long flags;
  193. if (!mrioc->fwevt_worker_thread)
  194. return;
  195. spin_lock_irqsave(&mrioc->fwevt_lock, flags);
  196. /* get fwevt reference count while adding it to fwevt_list */
  197. mpi3mr_fwevt_get(fwevt);
  198. INIT_LIST_HEAD(&fwevt->list);
  199. list_add_tail(&fwevt->list, &mrioc->fwevt_list);
  200. INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker);
  201. /* get fwevt reference count while enqueueing it to worker queue */
  202. mpi3mr_fwevt_get(fwevt);
  203. queue_work(mrioc->fwevt_worker_thread, &fwevt->work);
  204. spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
  205. }
  206. /**
  207. * mpi3mr_fwevt_del_from_list - Delete firmware event from list
  208. * @mrioc: Adapter instance reference
  209. * @fwevt: Firmware event reference
  210. *
  211. * Delete the given firmware event from the firmware event list.
  212. *
  213. * Return: Nothing.
  214. */
  215. static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc,
  216. struct mpi3mr_fwevt *fwevt)
  217. {
  218. unsigned long flags;
  219. spin_lock_irqsave(&mrioc->fwevt_lock, flags);
  220. if (!list_empty(&fwevt->list)) {
  221. list_del_init(&fwevt->list);
  222. /*
  223. * Put fwevt reference count after
  224. * removing it from fwevt_list
  225. */
  226. mpi3mr_fwevt_put(fwevt);
  227. }
  228. spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
  229. }
  230. /**
  231. * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list
  232. * @mrioc: Adapter instance reference
  233. *
  234. * Dequeue a firmware event from the firmware event list.
  235. *
  236. * Return: firmware event.
  237. */
  238. static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt(
  239. struct mpi3mr_ioc *mrioc)
  240. {
  241. unsigned long flags;
  242. struct mpi3mr_fwevt *fwevt = NULL;
  243. spin_lock_irqsave(&mrioc->fwevt_lock, flags);
  244. if (!list_empty(&mrioc->fwevt_list)) {
  245. fwevt = list_first_entry(&mrioc->fwevt_list,
  246. struct mpi3mr_fwevt, list);
  247. list_del_init(&fwevt->list);
  248. /*
  249. * Put fwevt reference count after
  250. * removing it from fwevt_list
  251. */
  252. mpi3mr_fwevt_put(fwevt);
  253. }
  254. spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
  255. return fwevt;
  256. }
  257. /**
  258. * mpi3mr_cancel_work - cancel firmware event
  259. * @fwevt: fwevt object which needs to be canceled
  260. *
  261. * Return: Nothing.
  262. */
  263. static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt)
  264. {
  265. /*
  266. * Wait on the fwevt to complete. If this returns 1, then
  267. * the event was never executed.
  268. *
  269. * If it did execute, we wait for it to finish, and the put will
  270. * happen from mpi3mr_process_fwevt()
  271. */
  272. if (cancel_work_sync(&fwevt->work)) {
  273. /*
  274. * Put fwevt reference count after
  275. * dequeuing it from worker queue
  276. */
  277. mpi3mr_fwevt_put(fwevt);
  278. /*
  279. * Put fwevt reference count to neutralize
  280. * kref_init increment
  281. */
  282. mpi3mr_fwevt_put(fwevt);
  283. }
  284. }
  285. /**
  286. * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list
  287. * @mrioc: Adapter instance reference
  288. *
  289. * Flush all pending firmware events from the firmware event
  290. * list.
  291. *
  292. * Return: Nothing.
  293. */
  294. void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
  295. {
  296. struct mpi3mr_fwevt *fwevt = NULL;
  297. if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) ||
  298. !mrioc->fwevt_worker_thread)
  299. return;
  300. while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)))
  301. mpi3mr_cancel_work(fwevt);
  302. if (mrioc->current_event) {
  303. fwevt = mrioc->current_event;
  304. /*
  305. * Don't call cancel_work_sync() API for the
  306. * fwevt work if the controller reset is
  307. * get called as part of processing the
  308. * same fwevt work (or) when worker thread is
  309. * waiting for device add/remove APIs to complete.
  310. * Otherwise we will see deadlock.
  311. */
  312. if (current_work() == &fwevt->work || fwevt->pending_at_sml) {
  313. fwevt->discard = 1;
  314. return;
  315. }
  316. mpi3mr_cancel_work(fwevt);
  317. }
  318. }
  319. /**
  320. * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event
  321. * @mrioc: Adapter instance reference
  322. * @tg: Throttle group information pointer
  323. *
  324. * Accessor to queue on synthetically generated driver event to
  325. * the event worker thread, the driver event will be used to
  326. * reduce the QD of all VDs in the TG from the worker thread.
  327. *
  328. * Return: None.
  329. */
  330. static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc,
  331. struct mpi3mr_throttle_group_info *tg)
  332. {
  333. struct mpi3mr_fwevt *fwevt;
  334. u16 sz = sizeof(struct mpi3mr_throttle_group_info *);
  335. /*
  336. * If the QD reduction event is already queued due to throttle and if
  337. * the QD is not restored through device info change event
  338. * then dont queue further reduction events
  339. */
  340. if (tg->fw_qd != tg->modified_qd)
  341. return;
  342. fwevt = mpi3mr_alloc_fwevt(sz);
  343. if (!fwevt) {
  344. ioc_warn(mrioc, "failed to queue TG QD reduction event\n");
  345. return;
  346. }
  347. *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg;
  348. fwevt->mrioc = mrioc;
  349. fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION;
  350. fwevt->send_ack = 0;
  351. fwevt->process_evt = 1;
  352. fwevt->evt_ctx = 0;
  353. fwevt->event_data_size = sz;
  354. tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8);
  355. dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n",
  356. tg->id);
  357. mpi3mr_fwevt_add_to_list(mrioc, fwevt);
  358. }
  359. /**
  360. * mpi3mr_invalidate_devhandles -Invalidate device handles
  361. * @mrioc: Adapter instance reference
  362. *
  363. * Invalidate the device handles in the target device structures
  364. * . Called post reset prior to reinitializing the controller.
  365. *
  366. * Return: Nothing.
  367. */
  368. void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
  369. {
  370. struct mpi3mr_tgt_dev *tgtdev;
  371. struct mpi3mr_stgt_priv_data *tgt_priv;
  372. list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
  373. tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
  374. if (tgtdev->starget && tgtdev->starget->hostdata) {
  375. tgt_priv = tgtdev->starget->hostdata;
  376. tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
  377. tgt_priv->io_throttle_enabled = 0;
  378. tgt_priv->io_divert = 0;
  379. tgt_priv->throttle_group = NULL;
  380. if (tgtdev->host_exposed)
  381. atomic_set(&tgt_priv->block_io, 1);
  382. }
  383. }
  384. }
  385. /**
  386. * mpi3mr_print_scmd - print individual SCSI command
  387. * @rq: Block request
  388. * @data: Adapter instance reference
  389. *
  390. * Print the SCSI command details if it is in LLD scope.
  391. *
  392. * Return: true always.
  393. */
  394. static bool mpi3mr_print_scmd(struct request *rq, void *data)
  395. {
  396. struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
  397. struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
  398. struct scmd_priv *priv = NULL;
  399. if (scmd) {
  400. priv = scsi_cmd_priv(scmd);
  401. if (!priv->in_lld_scope)
  402. goto out;
  403. ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n",
  404. __func__, priv->host_tag, priv->req_q_idx + 1);
  405. scsi_print_command(scmd);
  406. }
  407. out:
  408. return(true);
  409. }
  410. /**
  411. * mpi3mr_flush_scmd - Flush individual SCSI command
  412. * @rq: Block request
  413. * @data: Adapter instance reference
  414. *
  415. * Return the SCSI command to the upper layers if it is in LLD
  416. * scope.
  417. *
  418. * Return: true always.
  419. */
  420. static bool mpi3mr_flush_scmd(struct request *rq, void *data)
  421. {
  422. struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
  423. struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
  424. struct scmd_priv *priv = NULL;
  425. if (scmd) {
  426. priv = scsi_cmd_priv(scmd);
  427. if (!priv->in_lld_scope)
  428. goto out;
  429. if (priv->meta_sg_valid)
  430. dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
  431. scsi_prot_sg_count(scmd), scmd->sc_data_direction);
  432. mpi3mr_clear_scmd_priv(mrioc, scmd);
  433. scsi_dma_unmap(scmd);
  434. scmd->result = DID_RESET << 16;
  435. scsi_print_command(scmd);
  436. scsi_done(scmd);
  437. mrioc->flush_io_count++;
  438. }
  439. out:
  440. return(true);
  441. }
  442. /**
  443. * mpi3mr_count_dev_pending - Count commands pending for a lun
  444. * @rq: Block request
  445. * @data: SCSI device reference
  446. *
  447. * This is an iterator function called for each SCSI command in
  448. * a host and if the command is pending in the LLD for the
  449. * specific device(lun) then device specific pending I/O counter
  450. * is updated in the device structure.
  451. *
  452. * Return: true always.
  453. */
  454. static bool mpi3mr_count_dev_pending(struct request *rq, void *data)
  455. {
  456. struct scsi_device *sdev = (struct scsi_device *)data;
  457. struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
  458. struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
  459. struct scmd_priv *priv;
  460. if (scmd) {
  461. priv = scsi_cmd_priv(scmd);
  462. if (!priv->in_lld_scope)
  463. goto out;
  464. if (scmd->device == sdev)
  465. sdev_priv_data->pend_count++;
  466. }
  467. out:
  468. return true;
  469. }
  470. /**
  471. * mpi3mr_count_tgt_pending - Count commands pending for target
  472. * @rq: Block request
  473. * @data: SCSI target reference
  474. *
  475. * This is an iterator function called for each SCSI command in
  476. * a host and if the command is pending in the LLD for the
  477. * specific target then target specific pending I/O counter is
  478. * updated in the target structure.
  479. *
  480. * Return: true always.
  481. */
  482. static bool mpi3mr_count_tgt_pending(struct request *rq, void *data)
  483. {
  484. struct scsi_target *starget = (struct scsi_target *)data;
  485. struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata;
  486. struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
  487. struct scmd_priv *priv;
  488. if (scmd) {
  489. priv = scsi_cmd_priv(scmd);
  490. if (!priv->in_lld_scope)
  491. goto out;
  492. if (scmd->device && (scsi_target(scmd->device) == starget))
  493. stgt_priv_data->pend_count++;
  494. }
  495. out:
  496. return true;
  497. }
  498. /**
  499. * mpi3mr_flush_host_io - Flush host I/Os
  500. * @mrioc: Adapter instance reference
  501. *
  502. * Flush all of the pending I/Os by calling
  503. * blk_mq_tagset_busy_iter() for each possible tag. This is
  504. * executed post controller reset
  505. *
  506. * Return: Nothing.
  507. */
  508. void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc)
  509. {
  510. struct Scsi_Host *shost = mrioc->shost;
  511. mrioc->flush_io_count = 0;
  512. ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__);
  513. blk_mq_tagset_busy_iter(&shost->tag_set,
  514. mpi3mr_flush_scmd, (void *)mrioc);
  515. ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__,
  516. mrioc->flush_io_count);
  517. }
  518. /**
  519. * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds
  520. * @mrioc: Adapter instance reference
  521. *
  522. * This function waits for currently running IO poll threads to
  523. * exit and then flushes all host I/Os and any internal pending
  524. * cmds. This is executed after controller is marked as
  525. * unrecoverable.
  526. *
  527. * Return: Nothing.
  528. */
  529. void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc)
  530. {
  531. struct Scsi_Host *shost = mrioc->shost;
  532. int i;
  533. if (!mrioc->unrecoverable)
  534. return;
  535. if (mrioc->op_reply_qinfo) {
  536. for (i = 0; i < mrioc->num_queues; i++) {
  537. while (atomic_read(&mrioc->op_reply_qinfo[i].in_use))
  538. udelay(500);
  539. atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
  540. }
  541. }
  542. mrioc->flush_io_count = 0;
  543. blk_mq_tagset_busy_iter(&shost->tag_set,
  544. mpi3mr_flush_scmd, (void *)mrioc);
  545. mpi3mr_flush_delayed_cmd_lists(mrioc);
  546. mpi3mr_flush_drv_cmds(mrioc);
  547. }
  548. /**
  549. * mpi3mr_alloc_tgtdev - target device allocator
  550. *
  551. * Allocate target device instance and initialize the reference
  552. * count
  553. *
  554. * Return: target device instance.
  555. */
  556. static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void)
  557. {
  558. struct mpi3mr_tgt_dev *tgtdev;
  559. tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC);
  560. if (!tgtdev)
  561. return NULL;
  562. kref_init(&tgtdev->ref_count);
  563. return tgtdev;
  564. }
  565. /**
  566. * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list
  567. * @mrioc: Adapter instance reference
  568. * @tgtdev: Target device
  569. *
  570. * Add the target device to the target device list
  571. *
  572. * Return: Nothing.
  573. */
  574. static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc,
  575. struct mpi3mr_tgt_dev *tgtdev)
  576. {
  577. unsigned long flags;
  578. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  579. mpi3mr_tgtdev_get(tgtdev);
  580. INIT_LIST_HEAD(&tgtdev->list);
  581. list_add_tail(&tgtdev->list, &mrioc->tgtdev_list);
  582. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  583. }
  584. /**
  585. * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list
  586. * @mrioc: Adapter instance reference
  587. * @tgtdev: Target device
  588. *
  589. * Remove the target device from the target device list
  590. *
  591. * Return: Nothing.
  592. */
  593. static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc,
  594. struct mpi3mr_tgt_dev *tgtdev)
  595. {
  596. unsigned long flags;
  597. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  598. if (!list_empty(&tgtdev->list)) {
  599. list_del_init(&tgtdev->list);
  600. mpi3mr_tgtdev_put(tgtdev);
  601. }
  602. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  603. }
  604. /**
  605. * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
  606. * @mrioc: Adapter instance reference
  607. * @handle: Device handle
  608. *
  609. * Accessor to retrieve target device from the device handle.
  610. * Non Lock version
  611. *
  612. * Return: Target device reference.
  613. */
  614. static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle(
  615. struct mpi3mr_ioc *mrioc, u16 handle)
  616. {
  617. struct mpi3mr_tgt_dev *tgtdev;
  618. assert_spin_locked(&mrioc->tgtdev_lock);
  619. list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
  620. if (tgtdev->dev_handle == handle)
  621. goto found_tgtdev;
  622. return NULL;
  623. found_tgtdev:
  624. mpi3mr_tgtdev_get(tgtdev);
  625. return tgtdev;
  626. }
  627. /**
  628. * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
  629. * @mrioc: Adapter instance reference
  630. * @handle: Device handle
  631. *
  632. * Accessor to retrieve target device from the device handle.
  633. * Lock version
  634. *
  635. * Return: Target device reference.
  636. */
  637. struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
  638. struct mpi3mr_ioc *mrioc, u16 handle)
  639. {
  640. struct mpi3mr_tgt_dev *tgtdev;
  641. unsigned long flags;
  642. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  643. tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
  644. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  645. return tgtdev;
  646. }
  647. /**
  648. * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID
  649. * @mrioc: Adapter instance reference
  650. * @persist_id: Persistent ID
  651. *
  652. * Accessor to retrieve target device from the Persistent ID.
  653. * Non Lock version
  654. *
  655. * Return: Target device reference.
  656. */
  657. static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id(
  658. struct mpi3mr_ioc *mrioc, u16 persist_id)
  659. {
  660. struct mpi3mr_tgt_dev *tgtdev;
  661. assert_spin_locked(&mrioc->tgtdev_lock);
  662. list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
  663. if (tgtdev->perst_id == persist_id)
  664. goto found_tgtdev;
  665. return NULL;
  666. found_tgtdev:
  667. mpi3mr_tgtdev_get(tgtdev);
  668. return tgtdev;
  669. }
  670. /**
  671. * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID
  672. * @mrioc: Adapter instance reference
  673. * @persist_id: Persistent ID
  674. *
  675. * Accessor to retrieve target device from the Persistent ID.
  676. * Lock version
  677. *
  678. * Return: Target device reference.
  679. */
  680. static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id(
  681. struct mpi3mr_ioc *mrioc, u16 persist_id)
  682. {
  683. struct mpi3mr_tgt_dev *tgtdev;
  684. unsigned long flags;
  685. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  686. tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id);
  687. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  688. return tgtdev;
  689. }
  690. /**
  691. * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private
  692. * @mrioc: Adapter instance reference
  693. * @tgt_priv: Target private data
  694. *
  695. * Accessor to return target device from the target private
  696. * data. Non Lock version
  697. *
  698. * Return: Target device reference.
  699. */
  700. static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv(
  701. struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv)
  702. {
  703. struct mpi3mr_tgt_dev *tgtdev;
  704. assert_spin_locked(&mrioc->tgtdev_lock);
  705. tgtdev = tgt_priv->tgt_dev;
  706. if (tgtdev)
  707. mpi3mr_tgtdev_get(tgtdev);
  708. return tgtdev;
  709. }
  710. /**
  711. * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs
  712. * @mrioc: Adapter instance reference
  713. * @tg: Throttle group information pointer
  714. * @divert_value: 1 or 0
  715. *
  716. * Accessor to set io_divert flag for each device associated
  717. * with the given throttle group with the given value.
  718. *
  719. * Return: None.
  720. */
  721. static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
  722. struct mpi3mr_throttle_group_info *tg, u8 divert_value)
  723. {
  724. unsigned long flags;
  725. struct mpi3mr_tgt_dev *tgtdev;
  726. struct mpi3mr_stgt_priv_data *tgt_priv;
  727. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  728. list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
  729. if (tgtdev->starget && tgtdev->starget->hostdata) {
  730. tgt_priv = tgtdev->starget->hostdata;
  731. if (tgt_priv->throttle_group == tg)
  732. tgt_priv->io_divert = divert_value;
  733. }
  734. }
  735. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  736. }
  737. /**
  738. * mpi3mr_print_device_event_notice - print notice related to post processing of
  739. * device event after controller reset.
  740. *
  741. * @mrioc: Adapter instance reference
  742. * @device_add: true for device add event and false for device removal event
  743. *
  744. * Return: None.
  745. */
  746. void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
  747. bool device_add)
  748. {
  749. ioc_notice(mrioc, "Device %s was in progress before the reset and\n",
  750. (device_add ? "addition" : "removal"));
  751. ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n");
  752. ioc_notice(mrioc, "are matched with attached devices for correctness\n");
  753. }
  754. /**
  755. * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers
  756. * @mrioc: Adapter instance reference
  757. * @tgtdev: Target device structure
  758. *
  759. * Checks whether the device is exposed to upper layers and if it
  760. * is then remove the device from upper layers by calling
  761. * scsi_remove_target().
  762. *
  763. * Return: 0 on success, non zero on failure.
  764. */
  765. void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
  766. struct mpi3mr_tgt_dev *tgtdev)
  767. {
  768. struct mpi3mr_stgt_priv_data *tgt_priv;
  769. ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n",
  770. __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
  771. if (tgtdev->starget && tgtdev->starget->hostdata) {
  772. tgt_priv = tgtdev->starget->hostdata;
  773. atomic_set(&tgt_priv->block_io, 0);
  774. tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
  775. }
  776. if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
  777. MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) {
  778. if (tgtdev->starget) {
  779. if (mrioc->current_event)
  780. mrioc->current_event->pending_at_sml = 1;
  781. scsi_remove_target(&tgtdev->starget->dev);
  782. tgtdev->host_exposed = 0;
  783. if (mrioc->current_event) {
  784. mrioc->current_event->pending_at_sml = 0;
  785. if (mrioc->current_event->discard) {
  786. mpi3mr_print_device_event_notice(mrioc,
  787. false);
  788. return;
  789. }
  790. }
  791. }
  792. } else
  793. mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev);
  794. ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
  795. __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
  796. }
  797. /**
  798. * mpi3mr_report_tgtdev_to_host - Expose device to upper layers
  799. * @mrioc: Adapter instance reference
  800. * @perst_id: Persistent ID of the device
  801. *
  802. * Checks whether the device can be exposed to upper layers and
  803. * if it is not then expose the device to upper layers by
  804. * calling scsi_scan_target().
  805. *
  806. * Return: 0 on success, non zero on failure.
  807. */
  808. static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
  809. u16 perst_id)
  810. {
  811. int retval = 0;
  812. struct mpi3mr_tgt_dev *tgtdev;
  813. if (mrioc->reset_in_progress)
  814. return -1;
  815. tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
  816. if (!tgtdev) {
  817. retval = -1;
  818. goto out;
  819. }
  820. if (tgtdev->is_hidden || tgtdev->host_exposed) {
  821. retval = -1;
  822. goto out;
  823. }
  824. if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
  825. MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){
  826. tgtdev->host_exposed = 1;
  827. if (mrioc->current_event)
  828. mrioc->current_event->pending_at_sml = 1;
  829. scsi_scan_target(&mrioc->shost->shost_gendev,
  830. mrioc->scsi_device_channel, tgtdev->perst_id,
  831. SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
  832. if (!tgtdev->starget)
  833. tgtdev->host_exposed = 0;
  834. if (mrioc->current_event) {
  835. mrioc->current_event->pending_at_sml = 0;
  836. if (mrioc->current_event->discard) {
  837. mpi3mr_print_device_event_notice(mrioc, true);
  838. goto out;
  839. }
  840. }
  841. } else
  842. mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev);
  843. out:
  844. if (tgtdev)
  845. mpi3mr_tgtdev_put(tgtdev);
  846. return retval;
  847. }
  848. /**
  849. * mpi3mr_change_queue_depth- Change QD callback handler
  850. * @sdev: SCSI device reference
  851. * @q_depth: Queue depth
  852. *
  853. * Validate and limit QD and call scsi_change_queue_depth.
  854. *
  855. * Return: return value of scsi_change_queue_depth
  856. */
  857. static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
  858. int q_depth)
  859. {
  860. struct scsi_target *starget = scsi_target(sdev);
  861. struct Scsi_Host *shost = dev_to_shost(&starget->dev);
  862. int retval = 0;
  863. if (!sdev->tagged_supported)
  864. q_depth = 1;
  865. if (q_depth > shost->can_queue)
  866. q_depth = shost->can_queue;
  867. else if (!q_depth)
  868. q_depth = MPI3MR_DEFAULT_SDEV_QD;
  869. retval = scsi_change_queue_depth(sdev, q_depth);
  870. sdev->max_queue_depth = sdev->queue_depth;
  871. return retval;
  872. }
  873. /**
  874. * mpi3mr_update_sdev - Update SCSI device information
  875. * @sdev: SCSI device reference
  876. * @data: target device reference
  877. *
  878. * This is an iterator function called for each SCSI device in a
  879. * target to update the target specific information into each
  880. * SCSI device.
  881. *
  882. * Return: Nothing.
  883. */
  884. static void
  885. mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
  886. {
  887. struct mpi3mr_tgt_dev *tgtdev;
  888. tgtdev = (struct mpi3mr_tgt_dev *)data;
  889. if (!tgtdev)
  890. return;
  891. mpi3mr_change_queue_depth(sdev, tgtdev->q_depth);
  892. switch (tgtdev->dev_type) {
  893. case MPI3_DEVICE_DEVFORM_PCIE:
  894. /*The block layer hw sector size = 512*/
  895. if ((tgtdev->dev_spec.pcie_inf.dev_info &
  896. MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
  897. MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
  898. blk_queue_max_hw_sectors(sdev->request_queue,
  899. tgtdev->dev_spec.pcie_inf.mdts / 512);
  900. if (tgtdev->dev_spec.pcie_inf.pgsz == 0)
  901. blk_queue_virt_boundary(sdev->request_queue,
  902. ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
  903. else
  904. blk_queue_virt_boundary(sdev->request_queue,
  905. ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
  906. }
  907. break;
  908. default:
  909. break;
  910. }
  911. }
  912. /**
  913. * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
  914. * @mrioc: Adapter instance reference
  915. *
  916. * This is executed post controller reset to identify any
  917. * missing devices during reset and remove from the upper layers
  918. * or expose any newly detected device to the upper layers.
  919. *
  920. * Return: Nothing.
  921. */
  922. void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
  923. {
  924. struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
  925. list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
  926. list) {
  927. if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
  928. dprint_reset(mrioc, "removing target device with perst_id(%d)\n",
  929. tgtdev->perst_id);
  930. if (tgtdev->host_exposed)
  931. mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
  932. mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
  933. mpi3mr_tgtdev_put(tgtdev);
  934. }
  935. }
  936. tgtdev = NULL;
  937. list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
  938. if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
  939. !tgtdev->is_hidden && !tgtdev->host_exposed)
  940. mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
  941. }
  942. }
  943. /**
  944. * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf
  945. * @mrioc: Adapter instance reference
  946. * @tgtdev: Target device internal structure
  947. * @dev_pg0: New device page0
  948. * @is_added: Flag to indicate the device is just added
  949. *
  950. * Update the information from the device page0 into the driver
  951. * cached target device structure.
  952. *
  953. * Return: Nothing.
  954. */
  955. static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
  956. struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0,
  957. bool is_added)
  958. {
  959. u16 flags = 0;
  960. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
  961. struct mpi3mr_enclosure_node *enclosure_dev = NULL;
  962. u8 prot_mask = 0;
  963. tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
  964. tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle);
  965. tgtdev->dev_type = dev_pg0->device_form;
  966. tgtdev->io_unit_port = dev_pg0->io_unit_port;
  967. tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle);
  968. tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle);
  969. tgtdev->slot = le16_to_cpu(dev_pg0->slot);
  970. tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth);
  971. tgtdev->wwid = le64_to_cpu(dev_pg0->wwid);
  972. tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags);
  973. if (tgtdev->encl_handle)
  974. enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
  975. tgtdev->encl_handle);
  976. if (enclosure_dev)
  977. tgtdev->enclosure_logical_id = le64_to_cpu(
  978. enclosure_dev->pg0.enclosure_logical_id);
  979. flags = tgtdev->devpg0_flag;
  980. tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
  981. if (is_added == true)
  982. tgtdev->io_throttle_enabled =
  983. (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
  984. if (tgtdev->starget && tgtdev->starget->hostdata) {
  985. scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
  986. tgtdev->starget->hostdata;
  987. scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
  988. scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
  989. scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
  990. scsi_tgt_priv_data->io_throttle_enabled =
  991. tgtdev->io_throttle_enabled;
  992. if (is_added == true)
  993. atomic_set(&scsi_tgt_priv_data->block_io, 0);
  994. }
  995. switch (dev_pg0->access_status) {
  996. case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
  997. case MPI3_DEVICE0_ASTATUS_PREPARE:
  998. case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
  999. case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
  1000. break;
  1001. default:
  1002. tgtdev->is_hidden = 1;
  1003. break;
  1004. }
  1005. switch (tgtdev->dev_type) {
  1006. case MPI3_DEVICE_DEVFORM_SAS_SATA:
  1007. {
  1008. struct mpi3_device0_sas_sata_format *sasinf =
  1009. &dev_pg0->device_specific.sas_sata_format;
  1010. u16 dev_info = le16_to_cpu(sasinf->device_info);
  1011. tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info;
  1012. tgtdev->dev_spec.sas_sata_inf.sas_address =
  1013. le64_to_cpu(sasinf->sas_address);
  1014. tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num;
  1015. tgtdev->dev_spec.sas_sata_inf.attached_phy_id =
  1016. sasinf->attached_phy_identifier;
  1017. if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
  1018. MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
  1019. tgtdev->is_hidden = 1;
  1020. else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
  1021. MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
  1022. tgtdev->is_hidden = 1;
  1023. if (((tgtdev->devpg0_flag &
  1024. MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)
  1025. && (tgtdev->devpg0_flag &
  1026. MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) ||
  1027. (tgtdev->parent_handle == 0xFFFF))
  1028. tgtdev->non_stl = 1;
  1029. if (tgtdev->dev_spec.sas_sata_inf.hba_port)
  1030. tgtdev->dev_spec.sas_sata_inf.hba_port->port_id =
  1031. dev_pg0->io_unit_port;
  1032. break;
  1033. }
  1034. case MPI3_DEVICE_DEVFORM_PCIE:
  1035. {
  1036. struct mpi3_device0_pcie_format *pcieinf =
  1037. &dev_pg0->device_specific.pcie_format;
  1038. u16 dev_info = le16_to_cpu(pcieinf->device_info);
  1039. tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
  1040. tgtdev->dev_spec.pcie_inf.capb =
  1041. le32_to_cpu(pcieinf->capabilities);
  1042. tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
  1043. /* 2^12 = 4096 */
  1044. tgtdev->dev_spec.pcie_inf.pgsz = 12;
  1045. if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
  1046. tgtdev->dev_spec.pcie_inf.mdts =
  1047. le32_to_cpu(pcieinf->maximum_data_transfer_size);
  1048. tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size;
  1049. tgtdev->dev_spec.pcie_inf.reset_to =
  1050. max_t(u8, pcieinf->controller_reset_to,
  1051. MPI3MR_INTADMCMD_TIMEOUT);
  1052. tgtdev->dev_spec.pcie_inf.abort_to =
  1053. max_t(u8, pcieinf->nvme_abort_to,
  1054. MPI3MR_INTADMCMD_TIMEOUT);
  1055. }
  1056. if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
  1057. tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
  1058. if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
  1059. MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
  1060. ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
  1061. MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
  1062. tgtdev->is_hidden = 1;
  1063. tgtdev->non_stl = 1;
  1064. if (!mrioc->shost)
  1065. break;
  1066. prot_mask = scsi_host_get_prot(mrioc->shost);
  1067. if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) {
  1068. scsi_host_set_prot(mrioc->shost, prot_mask & 0x77);
  1069. ioc_info(mrioc,
  1070. "%s : Disabling DIX0 prot capability\n", __func__);
  1071. ioc_info(mrioc,
  1072. "because HBA does not support DIX0 operation on NVME drives\n");
  1073. }
  1074. break;
  1075. }
  1076. case MPI3_DEVICE_DEVFORM_VD:
  1077. {
  1078. struct mpi3_device0_vd_format *vdinf =
  1079. &dev_pg0->device_specific.vd_format;
  1080. struct mpi3mr_throttle_group_info *tg = NULL;
  1081. u16 vdinf_io_throttle_group =
  1082. le16_to_cpu(vdinf->io_throttle_group);
  1083. tgtdev->dev_spec.vd_inf.state = vdinf->vd_state;
  1084. if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
  1085. tgtdev->is_hidden = 1;
  1086. tgtdev->non_stl = 1;
  1087. tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group;
  1088. tgtdev->dev_spec.vd_inf.tg_high =
  1089. le16_to_cpu(vdinf->io_throttle_group_high) * 2048;
  1090. tgtdev->dev_spec.vd_inf.tg_low =
  1091. le16_to_cpu(vdinf->io_throttle_group_low) * 2048;
  1092. if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) {
  1093. tg = mrioc->throttle_groups + vdinf_io_throttle_group;
  1094. tg->id = vdinf_io_throttle_group;
  1095. tg->high = tgtdev->dev_spec.vd_inf.tg_high;
  1096. tg->low = tgtdev->dev_spec.vd_inf.tg_low;
  1097. tg->qd_reduction =
  1098. tgtdev->dev_spec.vd_inf.tg_qd_reduction;
  1099. if (is_added == true)
  1100. tg->fw_qd = tgtdev->q_depth;
  1101. tg->modified_qd = tgtdev->q_depth;
  1102. }
  1103. tgtdev->dev_spec.vd_inf.tg = tg;
  1104. if (scsi_tgt_priv_data)
  1105. scsi_tgt_priv_data->throttle_group = tg;
  1106. break;
  1107. }
  1108. default:
  1109. break;
  1110. }
  1111. }
  1112. /**
  1113. * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
  1114. * @mrioc: Adapter instance reference
  1115. * @fwevt: Firmware event information.
  1116. *
  1117. * Process Device status Change event and based on device's new
  1118. * information, either expose the device to the upper layers, or
  1119. * remove the device from upper layers.
  1120. *
  1121. * Return: Nothing.
  1122. */
  1123. static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
  1124. struct mpi3mr_fwevt *fwevt)
  1125. {
  1126. u16 dev_handle = 0;
  1127. u8 uhide = 0, delete = 0, cleanup = 0;
  1128. struct mpi3mr_tgt_dev *tgtdev = NULL;
  1129. struct mpi3_event_data_device_status_change *evtdata =
  1130. (struct mpi3_event_data_device_status_change *)fwevt->event_data;
  1131. dev_handle = le16_to_cpu(evtdata->dev_handle);
  1132. ioc_info(mrioc,
  1133. "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
  1134. __func__, dev_handle, evtdata->reason_code);
  1135. switch (evtdata->reason_code) {
  1136. case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
  1137. delete = 1;
  1138. break;
  1139. case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
  1140. uhide = 1;
  1141. break;
  1142. case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
  1143. delete = 1;
  1144. cleanup = 1;
  1145. break;
  1146. default:
  1147. ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__,
  1148. evtdata->reason_code);
  1149. break;
  1150. }
  1151. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
  1152. if (!tgtdev)
  1153. goto out;
  1154. if (uhide) {
  1155. tgtdev->is_hidden = 0;
  1156. if (!tgtdev->host_exposed)
  1157. mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
  1158. }
  1159. if (tgtdev->starget && tgtdev->starget->hostdata) {
  1160. if (delete)
  1161. mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
  1162. }
  1163. if (cleanup) {
  1164. mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
  1165. mpi3mr_tgtdev_put(tgtdev);
  1166. }
  1167. out:
  1168. if (tgtdev)
  1169. mpi3mr_tgtdev_put(tgtdev);
  1170. }
  1171. /**
  1172. * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
  1173. * @mrioc: Adapter instance reference
  1174. * @dev_pg0: New device page0
  1175. *
  1176. * Process Device Info Change event and based on device's new
  1177. * information, either expose the device to the upper layers, or
  1178. * remove the device from upper layers or update the details of
  1179. * the device.
  1180. *
  1181. * Return: Nothing.
  1182. */
  1183. static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
  1184. struct mpi3_device_page0 *dev_pg0)
  1185. {
  1186. struct mpi3mr_tgt_dev *tgtdev = NULL;
  1187. u16 dev_handle = 0, perst_id = 0;
  1188. perst_id = le16_to_cpu(dev_pg0->persistent_id);
  1189. dev_handle = le16_to_cpu(dev_pg0->dev_handle);
  1190. ioc_info(mrioc,
  1191. "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
  1192. __func__, dev_handle, perst_id);
  1193. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
  1194. if (!tgtdev)
  1195. goto out;
  1196. mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false);
  1197. if (!tgtdev->is_hidden && !tgtdev->host_exposed)
  1198. mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
  1199. if (tgtdev->is_hidden && tgtdev->host_exposed)
  1200. mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
  1201. if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget)
  1202. starget_for_each_device(tgtdev->starget, (void *)tgtdev,
  1203. mpi3mr_update_sdev);
  1204. out:
  1205. if (tgtdev)
  1206. mpi3mr_tgtdev_put(tgtdev);
  1207. }
  1208. /**
  1209. * mpi3mr_free_enclosure_list - release enclosures
  1210. * @mrioc: Adapter instance reference
  1211. *
  1212. * Free memory allocated during encloure add.
  1213. *
  1214. * Return nothing.
  1215. */
  1216. void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc)
  1217. {
  1218. struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next;
  1219. list_for_each_entry_safe(enclosure_dev,
  1220. enclosure_dev_next, &mrioc->enclosure_list, list) {
  1221. list_del(&enclosure_dev->list);
  1222. kfree(enclosure_dev);
  1223. }
  1224. }
  1225. /**
  1226. * mpi3mr_enclosure_find_by_handle - enclosure search by handle
  1227. * @mrioc: Adapter instance reference
  1228. * @handle: Firmware device handle of the enclosure
  1229. *
  1230. * This searches for enclosure device based on handle, then returns the
  1231. * enclosure object.
  1232. *
  1233. * Return: Enclosure object reference or NULL
  1234. */
  1235. struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle(
  1236. struct mpi3mr_ioc *mrioc, u16 handle)
  1237. {
  1238. struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL;
  1239. list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) {
  1240. if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle)
  1241. continue;
  1242. r = enclosure_dev;
  1243. goto out;
  1244. }
  1245. out:
  1246. return r;
  1247. }
  1248. /**
  1249. * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event
  1250. * @mrioc: Adapter instance reference
  1251. * @encl_pg0: Enclosure page 0.
  1252. * @is_added: Added event or not
  1253. *
  1254. * Return nothing.
  1255. */
  1256. static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc,
  1257. struct mpi3_enclosure_page0 *encl_pg0, u8 is_added)
  1258. {
  1259. char *reason_str = NULL;
  1260. if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK))
  1261. return;
  1262. if (is_added)
  1263. reason_str = "enclosure added";
  1264. else
  1265. reason_str = "enclosure dev status changed";
  1266. ioc_info(mrioc,
  1267. "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n",
  1268. reason_str, le16_to_cpu(encl_pg0->enclosure_handle),
  1269. (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id));
  1270. ioc_info(mrioc,
  1271. "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n",
  1272. le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port,
  1273. le16_to_cpu(encl_pg0->flags),
  1274. ((le16_to_cpu(encl_pg0->flags) &
  1275. MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4));
  1276. }
  1277. /**
  1278. * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf
  1279. * @mrioc: Adapter instance reference
  1280. * @fwevt: Firmware event reference
  1281. *
  1282. * Prints information about the Enclosure device status or
  1283. * Enclosure add events if logging is enabled and add or remove
  1284. * the enclosure from the controller's internal list of
  1285. * enclosures.
  1286. *
  1287. * Return: Nothing.
  1288. */
  1289. static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc,
  1290. struct mpi3mr_fwevt *fwevt)
  1291. {
  1292. struct mpi3mr_enclosure_node *enclosure_dev = NULL;
  1293. struct mpi3_enclosure_page0 *encl_pg0;
  1294. u16 encl_handle;
  1295. u8 added, present;
  1296. encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data;
  1297. added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0;
  1298. mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added);
  1299. encl_handle = le16_to_cpu(encl_pg0->enclosure_handle);
  1300. present = ((le16_to_cpu(encl_pg0->flags) &
  1301. MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4);
  1302. if (encl_handle)
  1303. enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
  1304. encl_handle);
  1305. if (!enclosure_dev && present) {
  1306. enclosure_dev =
  1307. kzalloc(sizeof(struct mpi3mr_enclosure_node),
  1308. GFP_KERNEL);
  1309. if (!enclosure_dev)
  1310. return;
  1311. list_add_tail(&enclosure_dev->list,
  1312. &mrioc->enclosure_list);
  1313. }
  1314. if (enclosure_dev) {
  1315. if (!present) {
  1316. list_del(&enclosure_dev->list);
  1317. kfree(enclosure_dev);
  1318. } else
  1319. memcpy(&enclosure_dev->pg0, encl_pg0,
  1320. sizeof(enclosure_dev->pg0));
  1321. }
  1322. }
  1323. /**
  1324. * mpi3mr_sastopochg_evt_debug - SASTopoChange details
  1325. * @mrioc: Adapter instance reference
  1326. * @event_data: SAS topology change list event data
  1327. *
  1328. * Prints information about the SAS topology change event.
  1329. *
  1330. * Return: Nothing.
  1331. */
  1332. static void
  1333. mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc,
  1334. struct mpi3_event_data_sas_topology_change_list *event_data)
  1335. {
  1336. int i;
  1337. u16 handle;
  1338. u8 reason_code, phy_number;
  1339. char *status_str = NULL;
  1340. u8 link_rate, prev_link_rate;
  1341. switch (event_data->exp_status) {
  1342. case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
  1343. status_str = "remove";
  1344. break;
  1345. case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
  1346. status_str = "responding";
  1347. break;
  1348. case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
  1349. status_str = "remove delay";
  1350. break;
  1351. case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
  1352. status_str = "direct attached";
  1353. break;
  1354. default:
  1355. status_str = "unknown status";
  1356. break;
  1357. }
  1358. ioc_info(mrioc, "%s :sas topology change: (%s)\n",
  1359. __func__, status_str);
  1360. ioc_info(mrioc,
  1361. "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
  1362. __func__, le16_to_cpu(event_data->expander_dev_handle),
  1363. event_data->io_unit_port,
  1364. le16_to_cpu(event_data->enclosure_handle),
  1365. event_data->start_phy_num, event_data->num_entries);
  1366. for (i = 0; i < event_data->num_entries; i++) {
  1367. handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
  1368. if (!handle)
  1369. continue;
  1370. phy_number = event_data->start_phy_num + i;
  1371. reason_code = event_data->phy_entry[i].status &
  1372. MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
  1373. switch (reason_code) {
  1374. case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
  1375. status_str = "target remove";
  1376. break;
  1377. case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
  1378. status_str = "delay target remove";
  1379. break;
  1380. case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
  1381. status_str = "link status change";
  1382. break;
  1383. case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
  1384. status_str = "link status no change";
  1385. break;
  1386. case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
  1387. status_str = "target responding";
  1388. break;
  1389. default:
  1390. status_str = "unknown";
  1391. break;
  1392. }
  1393. link_rate = event_data->phy_entry[i].link_rate >> 4;
  1394. prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
  1395. ioc_info(mrioc,
  1396. "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
  1397. __func__, phy_number, handle, status_str, link_rate,
  1398. prev_link_rate);
  1399. }
  1400. }
  1401. /**
  1402. * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf
  1403. * @mrioc: Adapter instance reference
  1404. * @fwevt: Firmware event reference
  1405. *
  1406. * Prints information about the SAS topology change event and
  1407. * for "not responding" event code, removes the device from the
  1408. * upper layers.
  1409. *
  1410. * Return: Nothing.
  1411. */
  1412. static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
  1413. struct mpi3mr_fwevt *fwevt)
  1414. {
  1415. struct mpi3_event_data_sas_topology_change_list *event_data =
  1416. (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data;
  1417. int i;
  1418. u16 handle;
  1419. u8 reason_code;
  1420. u64 exp_sas_address = 0, parent_sas_address = 0;
  1421. struct mpi3mr_hba_port *hba_port = NULL;
  1422. struct mpi3mr_tgt_dev *tgtdev = NULL;
  1423. struct mpi3mr_sas_node *sas_expander = NULL;
  1424. unsigned long flags;
  1425. u8 link_rate, prev_link_rate, parent_phy_number;
  1426. mpi3mr_sastopochg_evt_debug(mrioc, event_data);
  1427. if (mrioc->sas_transport_enabled) {
  1428. hba_port = mpi3mr_get_hba_port_by_id(mrioc,
  1429. event_data->io_unit_port);
  1430. if (le16_to_cpu(event_data->expander_dev_handle)) {
  1431. spin_lock_irqsave(&mrioc->sas_node_lock, flags);
  1432. sas_expander = __mpi3mr_expander_find_by_handle(mrioc,
  1433. le16_to_cpu(event_data->expander_dev_handle));
  1434. if (sas_expander) {
  1435. exp_sas_address = sas_expander->sas_address;
  1436. hba_port = sas_expander->hba_port;
  1437. }
  1438. spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
  1439. parent_sas_address = exp_sas_address;
  1440. } else
  1441. parent_sas_address = mrioc->sas_hba.sas_address;
  1442. }
  1443. for (i = 0; i < event_data->num_entries; i++) {
  1444. if (fwevt->discard)
  1445. return;
  1446. handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
  1447. if (!handle)
  1448. continue;
  1449. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
  1450. if (!tgtdev)
  1451. continue;
  1452. reason_code = event_data->phy_entry[i].status &
  1453. MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
  1454. switch (reason_code) {
  1455. case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
  1456. if (tgtdev->host_exposed)
  1457. mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
  1458. mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
  1459. mpi3mr_tgtdev_put(tgtdev);
  1460. break;
  1461. case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
  1462. case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
  1463. case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
  1464. {
  1465. if (!mrioc->sas_transport_enabled || tgtdev->non_stl
  1466. || tgtdev->is_hidden)
  1467. break;
  1468. link_rate = event_data->phy_entry[i].link_rate >> 4;
  1469. prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
  1470. if (link_rate == prev_link_rate)
  1471. break;
  1472. if (!parent_sas_address)
  1473. break;
  1474. parent_phy_number = event_data->start_phy_num + i;
  1475. mpi3mr_update_links(mrioc, parent_sas_address, handle,
  1476. parent_phy_number, link_rate, hba_port);
  1477. break;
  1478. }
  1479. default:
  1480. break;
  1481. }
  1482. if (tgtdev)
  1483. mpi3mr_tgtdev_put(tgtdev);
  1484. }
  1485. if (mrioc->sas_transport_enabled && (event_data->exp_status ==
  1486. MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) {
  1487. if (sas_expander)
  1488. mpi3mr_expander_remove(mrioc, exp_sas_address,
  1489. hba_port);
  1490. }
  1491. }
  1492. /**
  1493. * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details
  1494. * @mrioc: Adapter instance reference
  1495. * @event_data: PCIe topology change list event data
  1496. *
  1497. * Prints information about the PCIe topology change event.
  1498. *
  1499. * Return: Nothing.
  1500. */
  1501. static void
  1502. mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc,
  1503. struct mpi3_event_data_pcie_topology_change_list *event_data)
  1504. {
  1505. int i;
  1506. u16 handle;
  1507. u16 reason_code;
  1508. u8 port_number;
  1509. char *status_str = NULL;
  1510. u8 link_rate, prev_link_rate;
  1511. switch (event_data->switch_status) {
  1512. case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
  1513. status_str = "remove";
  1514. break;
  1515. case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
  1516. status_str = "responding";
  1517. break;
  1518. case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
  1519. status_str = "remove delay";
  1520. break;
  1521. case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
  1522. status_str = "direct attached";
  1523. break;
  1524. default:
  1525. status_str = "unknown status";
  1526. break;
  1527. }
  1528. ioc_info(mrioc, "%s :pcie topology change: (%s)\n",
  1529. __func__, status_str);
  1530. ioc_info(mrioc,
  1531. "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n",
  1532. __func__, le16_to_cpu(event_data->switch_dev_handle),
  1533. le16_to_cpu(event_data->enclosure_handle),
  1534. event_data->start_port_num, event_data->num_entries);
  1535. for (i = 0; i < event_data->num_entries; i++) {
  1536. handle =
  1537. le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
  1538. if (!handle)
  1539. continue;
  1540. port_number = event_data->start_port_num + i;
  1541. reason_code = event_data->port_entry[i].port_status;
  1542. switch (reason_code) {
  1543. case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
  1544. status_str = "target remove";
  1545. break;
  1546. case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
  1547. status_str = "delay target remove";
  1548. break;
  1549. case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
  1550. status_str = "link status change";
  1551. break;
  1552. case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
  1553. status_str = "link status no change";
  1554. break;
  1555. case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
  1556. status_str = "target responding";
  1557. break;
  1558. default:
  1559. status_str = "unknown";
  1560. break;
  1561. }
  1562. link_rate = event_data->port_entry[i].current_port_info &
  1563. MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
  1564. prev_link_rate = event_data->port_entry[i].previous_port_info &
  1565. MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
  1566. ioc_info(mrioc,
  1567. "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
  1568. __func__, port_number, handle, status_str, link_rate,
  1569. prev_link_rate);
  1570. }
  1571. }
  1572. /**
  1573. * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf
  1574. * @mrioc: Adapter instance reference
  1575. * @fwevt: Firmware event reference
  1576. *
  1577. * Prints information about the PCIe topology change event and
  1578. * for "not responding" event code, removes the device from the
  1579. * upper layers.
  1580. *
  1581. * Return: Nothing.
  1582. */
  1583. static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
  1584. struct mpi3mr_fwevt *fwevt)
  1585. {
  1586. struct mpi3_event_data_pcie_topology_change_list *event_data =
  1587. (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data;
  1588. int i;
  1589. u16 handle;
  1590. u8 reason_code;
  1591. struct mpi3mr_tgt_dev *tgtdev = NULL;
  1592. mpi3mr_pcietopochg_evt_debug(mrioc, event_data);
  1593. for (i = 0; i < event_data->num_entries; i++) {
  1594. if (fwevt->discard)
  1595. return;
  1596. handle =
  1597. le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
  1598. if (!handle)
  1599. continue;
  1600. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
  1601. if (!tgtdev)
  1602. continue;
  1603. reason_code = event_data->port_entry[i].port_status;
  1604. switch (reason_code) {
  1605. case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
  1606. if (tgtdev->host_exposed)
  1607. mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
  1608. mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
  1609. mpi3mr_tgtdev_put(tgtdev);
  1610. break;
  1611. default:
  1612. break;
  1613. }
  1614. if (tgtdev)
  1615. mpi3mr_tgtdev_put(tgtdev);
  1616. }
  1617. }
  1618. /**
  1619. * mpi3mr_logdata_evt_bh - Log data event bottomhalf
  1620. * @mrioc: Adapter instance reference
  1621. * @fwevt: Firmware event reference
  1622. *
  1623. * Extracts the event data and calls application interfacing
  1624. * function to process the event further.
  1625. *
  1626. * Return: Nothing.
  1627. */
  1628. static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc,
  1629. struct mpi3mr_fwevt *fwevt)
  1630. {
  1631. mpi3mr_app_save_logdata(mrioc, fwevt->event_data,
  1632. fwevt->event_data_size);
  1633. }
  1634. /**
  1635. * mpi3mr_update_sdev_qd - Update SCSI device queue depath
  1636. * @sdev: SCSI device reference
  1637. * @data: Queue depth reference
  1638. *
  1639. * This is an iterator function called for each SCSI device in a
  1640. * target to update the QD of each SCSI device.
  1641. *
  1642. * Return: Nothing.
  1643. */
  1644. static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data)
  1645. {
  1646. u16 *q_depth = (u16 *)data;
  1647. scsi_change_queue_depth(sdev, (int)*q_depth);
  1648. sdev->max_queue_depth = sdev->queue_depth;
  1649. }
  1650. /**
  1651. * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs
  1652. * @mrioc: Adapter instance reference
  1653. * @tg: Throttle group information pointer
  1654. *
  1655. * Accessor to reduce QD for each device associated with the
  1656. * given throttle group.
  1657. *
  1658. * Return: None.
  1659. */
  1660. static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
  1661. struct mpi3mr_throttle_group_info *tg)
  1662. {
  1663. unsigned long flags;
  1664. struct mpi3mr_tgt_dev *tgtdev;
  1665. struct mpi3mr_stgt_priv_data *tgt_priv;
  1666. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  1667. list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
  1668. if (tgtdev->starget && tgtdev->starget->hostdata) {
  1669. tgt_priv = tgtdev->starget->hostdata;
  1670. if (tgt_priv->throttle_group == tg) {
  1671. dprint_event_bh(mrioc,
  1672. "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n",
  1673. tgt_priv->perst_id, tgtdev->q_depth,
  1674. tg->modified_qd);
  1675. starget_for_each_device(tgtdev->starget,
  1676. (void *)&tg->modified_qd,
  1677. mpi3mr_update_sdev_qd);
  1678. }
  1679. }
  1680. }
  1681. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  1682. }
  1683. /**
  1684. * mpi3mr_fwevt_bh - Firmware event bottomhalf handler
  1685. * @mrioc: Adapter instance reference
  1686. * @fwevt: Firmware event reference
  1687. *
  1688. * Identifies the firmware event and calls corresponding bottomg
  1689. * half handler and sends event acknowledgment if required.
  1690. *
  1691. * Return: Nothing.
  1692. */
  1693. static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
  1694. struct mpi3mr_fwevt *fwevt)
  1695. {
  1696. struct mpi3_device_page0 *dev_pg0 = NULL;
  1697. u16 perst_id, handle, dev_info;
  1698. struct mpi3_device0_sas_sata_format *sasinf = NULL;
  1699. mpi3mr_fwevt_del_from_list(mrioc, fwevt);
  1700. mrioc->current_event = fwevt;
  1701. if (mrioc->stop_drv_processing)
  1702. goto out;
  1703. if (mrioc->unrecoverable) {
  1704. dprint_event_bh(mrioc,
  1705. "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n",
  1706. fwevt->event_id);
  1707. goto out;
  1708. }
  1709. if (!fwevt->process_evt)
  1710. goto evt_ack;
  1711. switch (fwevt->event_id) {
  1712. case MPI3_EVENT_DEVICE_ADDED:
  1713. {
  1714. dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
  1715. perst_id = le16_to_cpu(dev_pg0->persistent_id);
  1716. handle = le16_to_cpu(dev_pg0->dev_handle);
  1717. if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
  1718. mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
  1719. else if (mrioc->sas_transport_enabled &&
  1720. (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) {
  1721. sasinf = &dev_pg0->device_specific.sas_sata_format;
  1722. dev_info = le16_to_cpu(sasinf->device_info);
  1723. if (!mrioc->sas_hba.num_phys)
  1724. mpi3mr_sas_host_add(mrioc);
  1725. else
  1726. mpi3mr_sas_host_refresh(mrioc);
  1727. if (mpi3mr_is_expander_device(dev_info))
  1728. mpi3mr_expander_add(mrioc, handle);
  1729. }
  1730. break;
  1731. }
  1732. case MPI3_EVENT_DEVICE_INFO_CHANGED:
  1733. {
  1734. dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
  1735. perst_id = le16_to_cpu(dev_pg0->persistent_id);
  1736. if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
  1737. mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0);
  1738. break;
  1739. }
  1740. case MPI3_EVENT_DEVICE_STATUS_CHANGE:
  1741. {
  1742. mpi3mr_devstatuschg_evt_bh(mrioc, fwevt);
  1743. break;
  1744. }
  1745. case MPI3_EVENT_ENCL_DEVICE_ADDED:
  1746. case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
  1747. {
  1748. mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt);
  1749. break;
  1750. }
  1751. case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
  1752. {
  1753. mpi3mr_sastopochg_evt_bh(mrioc, fwevt);
  1754. break;
  1755. }
  1756. case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
  1757. {
  1758. mpi3mr_pcietopochg_evt_bh(mrioc, fwevt);
  1759. break;
  1760. }
  1761. case MPI3_EVENT_LOG_DATA:
  1762. {
  1763. mpi3mr_logdata_evt_bh(mrioc, fwevt);
  1764. break;
  1765. }
  1766. case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION:
  1767. {
  1768. struct mpi3mr_throttle_group_info *tg;
  1769. tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data;
  1770. dprint_event_bh(mrioc,
  1771. "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n",
  1772. tg->id, tg->need_qd_reduction);
  1773. if (tg->need_qd_reduction) {
  1774. mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg);
  1775. tg->need_qd_reduction = 0;
  1776. }
  1777. break;
  1778. }
  1779. case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH:
  1780. {
  1781. while (mrioc->device_refresh_on)
  1782. msleep(500);
  1783. dprint_event_bh(mrioc,
  1784. "scan for non responding and newly added devices after soft reset started\n");
  1785. if (mrioc->sas_transport_enabled) {
  1786. mpi3mr_refresh_sas_ports(mrioc);
  1787. mpi3mr_refresh_expanders(mrioc);
  1788. }
  1789. mpi3mr_rfresh_tgtdevs(mrioc);
  1790. ioc_info(mrioc,
  1791. "scan for non responding and newly added devices after soft reset completed\n");
  1792. break;
  1793. }
  1794. default:
  1795. break;
  1796. }
  1797. evt_ack:
  1798. if (fwevt->send_ack)
  1799. mpi3mr_process_event_ack(mrioc, fwevt->event_id,
  1800. fwevt->evt_ctx);
  1801. out:
  1802. /* Put fwevt reference count to neutralize kref_init increment */
  1803. mpi3mr_fwevt_put(fwevt);
  1804. mrioc->current_event = NULL;
  1805. }
  1806. /**
  1807. * mpi3mr_fwevt_worker - Firmware event worker
  1808. * @work: Work struct containing firmware event
  1809. *
  1810. * Extracts the firmware event and calls mpi3mr_fwevt_bh.
  1811. *
  1812. * Return: Nothing.
  1813. */
  1814. static void mpi3mr_fwevt_worker(struct work_struct *work)
  1815. {
  1816. struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt,
  1817. work);
  1818. mpi3mr_fwevt_bh(fwevt->mrioc, fwevt);
  1819. /*
  1820. * Put fwevt reference count after
  1821. * dequeuing it from worker queue
  1822. */
  1823. mpi3mr_fwevt_put(fwevt);
  1824. }
  1825. /**
  1826. * mpi3mr_create_tgtdev - Create and add a target device
  1827. * @mrioc: Adapter instance reference
  1828. * @dev_pg0: Device Page 0 data
  1829. *
  1830. * If the device specified by the device page 0 data is not
  1831. * present in the driver's internal list, allocate the memory
  1832. * for the device, populate the data and add to the list, else
  1833. * update the device data. The key is persistent ID.
  1834. *
  1835. * Return: 0 on success, -ENOMEM on memory allocation failure
  1836. */
  1837. static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
  1838. struct mpi3_device_page0 *dev_pg0)
  1839. {
  1840. int retval = 0;
  1841. struct mpi3mr_tgt_dev *tgtdev = NULL;
  1842. u16 perst_id = 0;
  1843. perst_id = le16_to_cpu(dev_pg0->persistent_id);
  1844. if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID)
  1845. return retval;
  1846. tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
  1847. if (tgtdev) {
  1848. mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
  1849. mpi3mr_tgtdev_put(tgtdev);
  1850. } else {
  1851. tgtdev = mpi3mr_alloc_tgtdev();
  1852. if (!tgtdev)
  1853. return -ENOMEM;
  1854. mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
  1855. mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
  1856. }
  1857. return retval;
  1858. }
  1859. /**
  1860. * mpi3mr_flush_delayed_cmd_lists - Flush pending commands
  1861. * @mrioc: Adapter instance reference
  1862. *
  1863. * Flush pending commands in the delayed lists due to a
  1864. * controller reset or driver removal as a cleanup.
  1865. *
  1866. * Return: Nothing
  1867. */
  1868. void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc)
  1869. {
  1870. struct delayed_dev_rmhs_node *_rmhs_node;
  1871. struct delayed_evt_ack_node *_evtack_node;
  1872. dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n");
  1873. while (!list_empty(&mrioc->delayed_rmhs_list)) {
  1874. _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next,
  1875. struct delayed_dev_rmhs_node, list);
  1876. list_del(&_rmhs_node->list);
  1877. kfree(_rmhs_node);
  1878. }
  1879. dprint_reset(mrioc, "flushing delayed event ack commands\n");
  1880. while (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
  1881. _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next,
  1882. struct delayed_evt_ack_node, list);
  1883. list_del(&_evtack_node->list);
  1884. kfree(_evtack_node);
  1885. }
  1886. }
  1887. /**
  1888. * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
  1889. * @mrioc: Adapter instance reference
  1890. * @drv_cmd: Internal command tracker
  1891. *
  1892. * Issues a target reset TM to the firmware from the device
  1893. * removal TM pend list or retry the removal handshake sequence
  1894. * based on the IOU control request IOC status.
  1895. *
  1896. * Return: Nothing
  1897. */
  1898. static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc,
  1899. struct mpi3mr_drv_cmd *drv_cmd)
  1900. {
  1901. u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
  1902. struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
  1903. if (drv_cmd->state & MPI3MR_CMD_RESET)
  1904. goto clear_drv_cmd;
  1905. ioc_info(mrioc,
  1906. "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
  1907. __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
  1908. drv_cmd->ioc_loginfo);
  1909. if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
  1910. if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) {
  1911. drv_cmd->retry_count++;
  1912. ioc_info(mrioc,
  1913. "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
  1914. __func__, drv_cmd->dev_handle,
  1915. drv_cmd->retry_count);
  1916. mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle,
  1917. drv_cmd, drv_cmd->iou_rc);
  1918. return;
  1919. }
  1920. ioc_err(mrioc,
  1921. "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
  1922. __func__, drv_cmd->dev_handle);
  1923. } else {
  1924. ioc_info(mrioc,
  1925. "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
  1926. __func__, drv_cmd->dev_handle);
  1927. clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap);
  1928. }
  1929. if (!list_empty(&mrioc->delayed_rmhs_list)) {
  1930. delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next,
  1931. struct delayed_dev_rmhs_node, list);
  1932. drv_cmd->dev_handle = delayed_dev_rmhs->handle;
  1933. drv_cmd->retry_count = 0;
  1934. drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
  1935. ioc_info(mrioc,
  1936. "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
  1937. __func__, drv_cmd->dev_handle);
  1938. mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd,
  1939. drv_cmd->iou_rc);
  1940. list_del(&delayed_dev_rmhs->list);
  1941. kfree(delayed_dev_rmhs);
  1942. return;
  1943. }
  1944. clear_drv_cmd:
  1945. drv_cmd->state = MPI3MR_CMD_NOTUSED;
  1946. drv_cmd->callback = NULL;
  1947. drv_cmd->retry_count = 0;
  1948. drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
  1949. clear_bit(cmd_idx, mrioc->devrem_bitmap);
  1950. }
  1951. /**
  1952. * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
  1953. * @mrioc: Adapter instance reference
  1954. * @drv_cmd: Internal command tracker
  1955. *
  1956. * Issues a target reset TM to the firmware from the device
  1957. * removal TM pend list or issue IO unit control request as
  1958. * part of device removal or hidden acknowledgment handshake.
  1959. *
  1960. * Return: Nothing
  1961. */
  1962. static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc,
  1963. struct mpi3mr_drv_cmd *drv_cmd)
  1964. {
  1965. struct mpi3_iounit_control_request iou_ctrl;
  1966. u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
  1967. struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
  1968. int retval;
  1969. if (drv_cmd->state & MPI3MR_CMD_RESET)
  1970. goto clear_drv_cmd;
  1971. if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
  1972. tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
  1973. if (tm_reply)
  1974. pr_info(IOCNAME
  1975. "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
  1976. mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
  1977. drv_cmd->ioc_loginfo,
  1978. le32_to_cpu(tm_reply->termination_count));
  1979. pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
  1980. mrioc->name, drv_cmd->dev_handle, cmd_idx);
  1981. memset(&iou_ctrl, 0, sizeof(iou_ctrl));
  1982. drv_cmd->state = MPI3MR_CMD_PENDING;
  1983. drv_cmd->is_waiting = 0;
  1984. drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
  1985. iou_ctrl.operation = drv_cmd->iou_rc;
  1986. iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle);
  1987. iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag);
  1988. iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
  1989. retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl),
  1990. 1);
  1991. if (retval) {
  1992. pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
  1993. mrioc->name);
  1994. goto clear_drv_cmd;
  1995. }
  1996. return;
  1997. clear_drv_cmd:
  1998. drv_cmd->state = MPI3MR_CMD_NOTUSED;
  1999. drv_cmd->callback = NULL;
  2000. drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
  2001. drv_cmd->retry_count = 0;
  2002. clear_bit(cmd_idx, mrioc->devrem_bitmap);
  2003. }
  2004. /**
  2005. * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
  2006. * @mrioc: Adapter instance reference
  2007. * @handle: Device handle
  2008. * @cmdparam: Internal command tracker
  2009. * @iou_rc: IO unit reason code
  2010. *
  2011. * Issues a target reset TM to the firmware or add it to a pend
  2012. * list as part of device removal or hidden acknowledgment
  2013. * handshake.
  2014. *
  2015. * Return: Nothing
  2016. */
  2017. static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
  2018. struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc)
  2019. {
  2020. struct mpi3_scsi_task_mgmt_request tm_req;
  2021. int retval = 0;
  2022. u16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
  2023. u8 retrycount = 5;
  2024. struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
  2025. struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
  2026. if (drv_cmd)
  2027. goto issue_cmd;
  2028. do {
  2029. cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap,
  2030. MPI3MR_NUM_DEVRMCMD);
  2031. if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
  2032. if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap))
  2033. break;
  2034. cmd_idx = MPI3MR_NUM_DEVRMCMD;
  2035. }
  2036. } while (retrycount--);
  2037. if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
  2038. delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs),
  2039. GFP_ATOMIC);
  2040. if (!delayed_dev_rmhs)
  2041. return;
  2042. INIT_LIST_HEAD(&delayed_dev_rmhs->list);
  2043. delayed_dev_rmhs->handle = handle;
  2044. delayed_dev_rmhs->iou_rc = iou_rc;
  2045. list_add_tail(&delayed_dev_rmhs->list,
  2046. &mrioc->delayed_rmhs_list);
  2047. ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
  2048. __func__, handle);
  2049. return;
  2050. }
  2051. drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx];
  2052. issue_cmd:
  2053. cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
  2054. ioc_info(mrioc,
  2055. "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
  2056. __func__, handle, cmd_idx);
  2057. memset(&tm_req, 0, sizeof(tm_req));
  2058. if (drv_cmd->state & MPI3MR_CMD_PENDING) {
  2059. ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
  2060. goto out;
  2061. }
  2062. drv_cmd->state = MPI3MR_CMD_PENDING;
  2063. drv_cmd->is_waiting = 0;
  2064. drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
  2065. drv_cmd->dev_handle = handle;
  2066. drv_cmd->iou_rc = iou_rc;
  2067. tm_req.dev_handle = cpu_to_le16(handle);
  2068. tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
  2069. tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
  2070. tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID);
  2071. tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
  2072. set_bit(handle, mrioc->removepend_bitmap);
  2073. retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
  2074. if (retval) {
  2075. ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n",
  2076. __func__);
  2077. goto out_failed;
  2078. }
  2079. out:
  2080. return;
  2081. out_failed:
  2082. drv_cmd->state = MPI3MR_CMD_NOTUSED;
  2083. drv_cmd->callback = NULL;
  2084. drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
  2085. drv_cmd->retry_count = 0;
  2086. clear_bit(cmd_idx, mrioc->devrem_bitmap);
  2087. }
  2088. /**
  2089. * mpi3mr_complete_evt_ack - event ack request completion
  2090. * @mrioc: Adapter instance reference
  2091. * @drv_cmd: Internal command tracker
  2092. *
  2093. * This is the completion handler for non blocking event
  2094. * acknowledgment sent to the firmware and this will issue any
  2095. * pending event acknowledgment request.
  2096. *
  2097. * Return: Nothing
  2098. */
  2099. static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc,
  2100. struct mpi3mr_drv_cmd *drv_cmd)
  2101. {
  2102. u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
  2103. struct delayed_evt_ack_node *delayed_evtack = NULL;
  2104. if (drv_cmd->state & MPI3MR_CMD_RESET)
  2105. goto clear_drv_cmd;
  2106. if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
  2107. dprint_event_th(mrioc,
  2108. "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n",
  2109. (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
  2110. drv_cmd->ioc_loginfo);
  2111. }
  2112. if (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
  2113. delayed_evtack =
  2114. list_entry(mrioc->delayed_evtack_cmds_list.next,
  2115. struct delayed_evt_ack_node, list);
  2116. mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd,
  2117. delayed_evtack->event_ctx);
  2118. list_del(&delayed_evtack->list);
  2119. kfree(delayed_evtack);
  2120. return;
  2121. }
  2122. clear_drv_cmd:
  2123. drv_cmd->state = MPI3MR_CMD_NOTUSED;
  2124. drv_cmd->callback = NULL;
  2125. clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
  2126. }
  2127. /**
  2128. * mpi3mr_send_event_ack - Issue event acknwoledgment request
  2129. * @mrioc: Adapter instance reference
  2130. * @event: MPI3 event id
  2131. * @cmdparam: Internal command tracker
  2132. * @event_ctx: event context
  2133. *
  2134. * Issues event acknowledgment request to the firmware if there
  2135. * is a free command to send the event ack else it to a pend
  2136. * list so that it will be processed on a completion of a prior
  2137. * event acknowledgment .
  2138. *
  2139. * Return: Nothing
  2140. */
  2141. static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
  2142. struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx)
  2143. {
  2144. struct mpi3_event_ack_request evtack_req;
  2145. int retval = 0;
  2146. u8 retrycount = 5;
  2147. u16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
  2148. struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
  2149. struct delayed_evt_ack_node *delayed_evtack = NULL;
  2150. if (drv_cmd) {
  2151. dprint_event_th(mrioc,
  2152. "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
  2153. event, event_ctx);
  2154. goto issue_cmd;
  2155. }
  2156. dprint_event_th(mrioc,
  2157. "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
  2158. event, event_ctx);
  2159. do {
  2160. cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap,
  2161. MPI3MR_NUM_EVTACKCMD);
  2162. if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
  2163. if (!test_and_set_bit(cmd_idx,
  2164. mrioc->evtack_cmds_bitmap))
  2165. break;
  2166. cmd_idx = MPI3MR_NUM_EVTACKCMD;
  2167. }
  2168. } while (retrycount--);
  2169. if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
  2170. delayed_evtack = kzalloc(sizeof(*delayed_evtack),
  2171. GFP_ATOMIC);
  2172. if (!delayed_evtack)
  2173. return;
  2174. INIT_LIST_HEAD(&delayed_evtack->list);
  2175. delayed_evtack->event = event;
  2176. delayed_evtack->event_ctx = event_ctx;
  2177. list_add_tail(&delayed_evtack->list,
  2178. &mrioc->delayed_evtack_cmds_list);
  2179. dprint_event_th(mrioc,
  2180. "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n",
  2181. event, event_ctx);
  2182. return;
  2183. }
  2184. drv_cmd = &mrioc->evtack_cmds[cmd_idx];
  2185. issue_cmd:
  2186. cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
  2187. memset(&evtack_req, 0, sizeof(evtack_req));
  2188. if (drv_cmd->state & MPI3MR_CMD_PENDING) {
  2189. dprint_event_th(mrioc,
  2190. "sending event ack failed due to command in use\n");
  2191. goto out;
  2192. }
  2193. drv_cmd->state = MPI3MR_CMD_PENDING;
  2194. drv_cmd->is_waiting = 0;
  2195. drv_cmd->callback = mpi3mr_complete_evt_ack;
  2196. evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
  2197. evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
  2198. evtack_req.event = event;
  2199. evtack_req.event_context = cpu_to_le32(event_ctx);
  2200. retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
  2201. sizeof(evtack_req), 1);
  2202. if (retval) {
  2203. dprint_event_th(mrioc,
  2204. "posting event ack request is failed\n");
  2205. goto out_failed;
  2206. }
  2207. dprint_event_th(mrioc,
  2208. "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n",
  2209. event, event_ctx);
  2210. out:
  2211. return;
  2212. out_failed:
  2213. drv_cmd->state = MPI3MR_CMD_NOTUSED;
  2214. drv_cmd->callback = NULL;
  2215. clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
  2216. }
  2217. /**
  2218. * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
  2219. * @mrioc: Adapter instance reference
  2220. * @event_reply: event data
  2221. *
  2222. * Checks for the reason code and based on that either block I/O
  2223. * to device, or unblock I/O to the device, or start the device
  2224. * removal handshake with reason as remove with the firmware for
  2225. * PCIe devices.
  2226. *
  2227. * Return: Nothing
  2228. */
  2229. static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc,
  2230. struct mpi3_event_notification_reply *event_reply)
  2231. {
  2232. struct mpi3_event_data_pcie_topology_change_list *topo_evt =
  2233. (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data;
  2234. int i;
  2235. u16 handle;
  2236. u8 reason_code;
  2237. struct mpi3mr_tgt_dev *tgtdev = NULL;
  2238. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
  2239. for (i = 0; i < topo_evt->num_entries; i++) {
  2240. handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle);
  2241. if (!handle)
  2242. continue;
  2243. reason_code = topo_evt->port_entry[i].port_status;
  2244. scsi_tgt_priv_data = NULL;
  2245. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
  2246. if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
  2247. scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
  2248. tgtdev->starget->hostdata;
  2249. switch (reason_code) {
  2250. case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
  2251. if (scsi_tgt_priv_data) {
  2252. scsi_tgt_priv_data->dev_removed = 1;
  2253. scsi_tgt_priv_data->dev_removedelay = 0;
  2254. atomic_set(&scsi_tgt_priv_data->block_io, 0);
  2255. }
  2256. mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
  2257. MPI3_CTRL_OP_REMOVE_DEVICE);
  2258. break;
  2259. case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
  2260. if (scsi_tgt_priv_data) {
  2261. scsi_tgt_priv_data->dev_removedelay = 1;
  2262. atomic_inc(&scsi_tgt_priv_data->block_io);
  2263. }
  2264. break;
  2265. case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
  2266. if (scsi_tgt_priv_data &&
  2267. scsi_tgt_priv_data->dev_removedelay) {
  2268. scsi_tgt_priv_data->dev_removedelay = 0;
  2269. atomic_dec_if_positive
  2270. (&scsi_tgt_priv_data->block_io);
  2271. }
  2272. break;
  2273. case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
  2274. default:
  2275. break;
  2276. }
  2277. if (tgtdev)
  2278. mpi3mr_tgtdev_put(tgtdev);
  2279. }
  2280. }
  2281. /**
  2282. * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
  2283. * @mrioc: Adapter instance reference
  2284. * @event_reply: event data
  2285. *
  2286. * Checks for the reason code and based on that either block I/O
  2287. * to device, or unblock I/O to the device, or start the device
  2288. * removal handshake with reason as remove with the firmware for
  2289. * SAS/SATA devices.
  2290. *
  2291. * Return: Nothing
  2292. */
  2293. static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc,
  2294. struct mpi3_event_notification_reply *event_reply)
  2295. {
  2296. struct mpi3_event_data_sas_topology_change_list *topo_evt =
  2297. (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data;
  2298. int i;
  2299. u16 handle;
  2300. u8 reason_code;
  2301. struct mpi3mr_tgt_dev *tgtdev = NULL;
  2302. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
  2303. for (i = 0; i < topo_evt->num_entries; i++) {
  2304. handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle);
  2305. if (!handle)
  2306. continue;
  2307. reason_code = topo_evt->phy_entry[i].status &
  2308. MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
  2309. scsi_tgt_priv_data = NULL;
  2310. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
  2311. if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
  2312. scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
  2313. tgtdev->starget->hostdata;
  2314. switch (reason_code) {
  2315. case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
  2316. if (scsi_tgt_priv_data) {
  2317. scsi_tgt_priv_data->dev_removed = 1;
  2318. scsi_tgt_priv_data->dev_removedelay = 0;
  2319. atomic_set(&scsi_tgt_priv_data->block_io, 0);
  2320. }
  2321. mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
  2322. MPI3_CTRL_OP_REMOVE_DEVICE);
  2323. break;
  2324. case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
  2325. if (scsi_tgt_priv_data) {
  2326. scsi_tgt_priv_data->dev_removedelay = 1;
  2327. atomic_inc(&scsi_tgt_priv_data->block_io);
  2328. }
  2329. break;
  2330. case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
  2331. if (scsi_tgt_priv_data &&
  2332. scsi_tgt_priv_data->dev_removedelay) {
  2333. scsi_tgt_priv_data->dev_removedelay = 0;
  2334. atomic_dec_if_positive
  2335. (&scsi_tgt_priv_data->block_io);
  2336. }
  2337. break;
  2338. case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
  2339. default:
  2340. break;
  2341. }
  2342. if (tgtdev)
  2343. mpi3mr_tgtdev_put(tgtdev);
  2344. }
  2345. }
  2346. /**
  2347. * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
  2348. * @mrioc: Adapter instance reference
  2349. * @event_reply: event data
  2350. *
  2351. * Checks for the reason code and based on that either block I/O
  2352. * to device, or unblock I/O to the device, or start the device
  2353. * removal handshake with reason as remove/hide acknowledgment
  2354. * with the firmware.
  2355. *
  2356. * Return: Nothing
  2357. */
  2358. static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
  2359. struct mpi3_event_notification_reply *event_reply)
  2360. {
  2361. u16 dev_handle = 0;
  2362. u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0;
  2363. struct mpi3mr_tgt_dev *tgtdev = NULL;
  2364. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
  2365. struct mpi3_event_data_device_status_change *evtdata =
  2366. (struct mpi3_event_data_device_status_change *)event_reply->event_data;
  2367. if (mrioc->stop_drv_processing)
  2368. goto out;
  2369. dev_handle = le16_to_cpu(evtdata->dev_handle);
  2370. switch (evtdata->reason_code) {
  2371. case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
  2372. case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
  2373. block = 1;
  2374. break;
  2375. case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
  2376. delete = 1;
  2377. hide = 1;
  2378. break;
  2379. case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
  2380. delete = 1;
  2381. remove = 1;
  2382. break;
  2383. case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
  2384. case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
  2385. ublock = 1;
  2386. break;
  2387. default:
  2388. break;
  2389. }
  2390. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
  2391. if (!tgtdev)
  2392. goto out;
  2393. if (hide)
  2394. tgtdev->is_hidden = hide;
  2395. if (tgtdev->starget && tgtdev->starget->hostdata) {
  2396. scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
  2397. tgtdev->starget->hostdata;
  2398. if (block)
  2399. atomic_inc(&scsi_tgt_priv_data->block_io);
  2400. if (delete)
  2401. scsi_tgt_priv_data->dev_removed = 1;
  2402. if (ublock)
  2403. atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
  2404. }
  2405. if (remove)
  2406. mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
  2407. MPI3_CTRL_OP_REMOVE_DEVICE);
  2408. if (hide)
  2409. mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
  2410. MPI3_CTRL_OP_HIDDEN_ACK);
  2411. out:
  2412. if (tgtdev)
  2413. mpi3mr_tgtdev_put(tgtdev);
  2414. }
  2415. /**
  2416. * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf
  2417. * @mrioc: Adapter instance reference
  2418. * @event_reply: event data
  2419. *
  2420. * Blocks and unblocks host level I/O based on the reason code
  2421. *
  2422. * Return: Nothing
  2423. */
  2424. static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc,
  2425. struct mpi3_event_notification_reply *event_reply)
  2426. {
  2427. struct mpi3_event_data_prepare_for_reset *evtdata =
  2428. (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data;
  2429. if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) {
  2430. dprint_event_th(mrioc,
  2431. "prepare for reset event top half with rc=start\n");
  2432. if (mrioc->prepare_for_reset)
  2433. return;
  2434. mrioc->prepare_for_reset = 1;
  2435. mrioc->prepare_for_reset_timeout_counter = 0;
  2436. } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
  2437. dprint_event_th(mrioc,
  2438. "prepare for reset top half with rc=abort\n");
  2439. mrioc->prepare_for_reset = 0;
  2440. mrioc->prepare_for_reset_timeout_counter = 0;
  2441. }
  2442. if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
  2443. == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
  2444. mpi3mr_send_event_ack(mrioc, event_reply->event, NULL,
  2445. le32_to_cpu(event_reply->event_context));
  2446. }
  2447. /**
  2448. * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf
  2449. * @mrioc: Adapter instance reference
  2450. * @event_reply: event data
  2451. *
  2452. * Identifies the new shutdown timeout value and update.
  2453. *
  2454. * Return: Nothing
  2455. */
  2456. static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
  2457. struct mpi3_event_notification_reply *event_reply)
  2458. {
  2459. struct mpi3_event_data_energy_pack_change *evtdata =
  2460. (struct mpi3_event_data_energy_pack_change *)event_reply->event_data;
  2461. u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout);
  2462. if (shutdown_timeout <= 0) {
  2463. ioc_warn(mrioc,
  2464. "%s :Invalid Shutdown Timeout received = %d\n",
  2465. __func__, shutdown_timeout);
  2466. return;
  2467. }
  2468. ioc_info(mrioc,
  2469. "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
  2470. __func__, mrioc->facts.shutdown_timeout, shutdown_timeout);
  2471. mrioc->facts.shutdown_timeout = shutdown_timeout;
  2472. }
  2473. /**
  2474. * mpi3mr_cablemgmt_evt_th - Cable management event tophalf
  2475. * @mrioc: Adapter instance reference
  2476. * @event_reply: event data
  2477. *
  2478. * Displays Cable manegemt event details.
  2479. *
  2480. * Return: Nothing
  2481. */
  2482. static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc,
  2483. struct mpi3_event_notification_reply *event_reply)
  2484. {
  2485. struct mpi3_event_data_cable_management *evtdata =
  2486. (struct mpi3_event_data_cable_management *)event_reply->event_data;
  2487. switch (evtdata->status) {
  2488. case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
  2489. {
  2490. ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n"
  2491. "Devices connected to this cable are not detected.\n"
  2492. "This cable requires %d mW of power.\n",
  2493. evtdata->receptacle_id,
  2494. le32_to_cpu(evtdata->active_cable_power_requirement));
  2495. break;
  2496. }
  2497. case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
  2498. {
  2499. ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n",
  2500. evtdata->receptacle_id);
  2501. break;
  2502. }
  2503. default:
  2504. break;
  2505. }
  2506. }
  2507. /**
  2508. * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event
  2509. * @mrioc: Adapter instance reference
  2510. *
  2511. * Add driver specific event to make sure that the driver won't process the
  2512. * events until all the devices are refreshed during soft reset.
  2513. *
  2514. * Return: Nothing
  2515. */
  2516. void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc)
  2517. {
  2518. struct mpi3mr_fwevt *fwevt = NULL;
  2519. fwevt = mpi3mr_alloc_fwevt(0);
  2520. if (!fwevt) {
  2521. dprint_event_th(mrioc,
  2522. "failed to schedule bottom half handler for event(0x%02x)\n",
  2523. MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH);
  2524. return;
  2525. }
  2526. fwevt->mrioc = mrioc;
  2527. fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH;
  2528. fwevt->send_ack = 0;
  2529. fwevt->process_evt = 1;
  2530. fwevt->evt_ctx = 0;
  2531. fwevt->event_data_size = 0;
  2532. mpi3mr_fwevt_add_to_list(mrioc, fwevt);
  2533. }
  2534. /**
  2535. * mpi3mr_os_handle_events - Firmware event handler
  2536. * @mrioc: Adapter instance reference
  2537. * @event_reply: event data
  2538. *
  2539. * Identify whteher the event has to handled and acknowledged
  2540. * and either process the event in the tophalf and/or schedule a
  2541. * bottom half through mpi3mr_fwevt_worker.
  2542. *
  2543. * Return: Nothing
  2544. */
  2545. void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
  2546. struct mpi3_event_notification_reply *event_reply)
  2547. {
  2548. u16 evt_type, sz;
  2549. struct mpi3mr_fwevt *fwevt = NULL;
  2550. bool ack_req = 0, process_evt_bh = 0;
  2551. if (mrioc->stop_drv_processing)
  2552. return;
  2553. if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
  2554. == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
  2555. ack_req = 1;
  2556. evt_type = event_reply->event;
  2557. switch (evt_type) {
  2558. case MPI3_EVENT_DEVICE_ADDED:
  2559. {
  2560. struct mpi3_device_page0 *dev_pg0 =
  2561. (struct mpi3_device_page0 *)event_reply->event_data;
  2562. if (mpi3mr_create_tgtdev(mrioc, dev_pg0))
  2563. ioc_err(mrioc,
  2564. "%s :Failed to add device in the device add event\n",
  2565. __func__);
  2566. else
  2567. process_evt_bh = 1;
  2568. break;
  2569. }
  2570. case MPI3_EVENT_DEVICE_STATUS_CHANGE:
  2571. {
  2572. process_evt_bh = 1;
  2573. mpi3mr_devstatuschg_evt_th(mrioc, event_reply);
  2574. break;
  2575. }
  2576. case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
  2577. {
  2578. process_evt_bh = 1;
  2579. mpi3mr_sastopochg_evt_th(mrioc, event_reply);
  2580. break;
  2581. }
  2582. case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
  2583. {
  2584. process_evt_bh = 1;
  2585. mpi3mr_pcietopochg_evt_th(mrioc, event_reply);
  2586. break;
  2587. }
  2588. case MPI3_EVENT_PREPARE_FOR_RESET:
  2589. {
  2590. mpi3mr_preparereset_evt_th(mrioc, event_reply);
  2591. ack_req = 0;
  2592. break;
  2593. }
  2594. case MPI3_EVENT_DEVICE_INFO_CHANGED:
  2595. case MPI3_EVENT_LOG_DATA:
  2596. case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
  2597. case MPI3_EVENT_ENCL_DEVICE_ADDED:
  2598. {
  2599. process_evt_bh = 1;
  2600. break;
  2601. }
  2602. case MPI3_EVENT_ENERGY_PACK_CHANGE:
  2603. {
  2604. mpi3mr_energypackchg_evt_th(mrioc, event_reply);
  2605. break;
  2606. }
  2607. case MPI3_EVENT_CABLE_MGMT:
  2608. {
  2609. mpi3mr_cablemgmt_evt_th(mrioc, event_reply);
  2610. break;
  2611. }
  2612. case MPI3_EVENT_SAS_DISCOVERY:
  2613. case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
  2614. case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
  2615. case MPI3_EVENT_PCIE_ENUMERATION:
  2616. break;
  2617. default:
  2618. ioc_info(mrioc, "%s :event 0x%02x is not handled\n",
  2619. __func__, evt_type);
  2620. break;
  2621. }
  2622. if (process_evt_bh || ack_req) {
  2623. sz = event_reply->event_data_length * 4;
  2624. fwevt = mpi3mr_alloc_fwevt(sz);
  2625. if (!fwevt) {
  2626. ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n",
  2627. __func__, __FILE__, __LINE__, __func__);
  2628. return;
  2629. }
  2630. memcpy(fwevt->event_data, event_reply->event_data, sz);
  2631. fwevt->mrioc = mrioc;
  2632. fwevt->event_id = evt_type;
  2633. fwevt->send_ack = ack_req;
  2634. fwevt->process_evt = process_evt_bh;
  2635. fwevt->evt_ctx = le32_to_cpu(event_reply->event_context);
  2636. mpi3mr_fwevt_add_to_list(mrioc, fwevt);
  2637. }
  2638. }
  2639. /**
  2640. * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO
  2641. * @mrioc: Adapter instance reference
  2642. * @scmd: SCSI command reference
  2643. * @scsiio_req: MPI3 SCSI IO request
  2644. *
  2645. * Identifies the protection information flags from the SCSI
  2646. * command and set appropriate flags in the MPI3 SCSI IO
  2647. * request.
  2648. *
  2649. * Return: Nothing
  2650. */
  2651. static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
  2652. struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
  2653. {
  2654. u16 eedp_flags = 0;
  2655. unsigned char prot_op = scsi_get_prot_op(scmd);
  2656. switch (prot_op) {
  2657. case SCSI_PROT_NORMAL:
  2658. return;
  2659. case SCSI_PROT_READ_STRIP:
  2660. eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
  2661. break;
  2662. case SCSI_PROT_WRITE_INSERT:
  2663. eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
  2664. break;
  2665. case SCSI_PROT_READ_INSERT:
  2666. eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
  2667. scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
  2668. break;
  2669. case SCSI_PROT_WRITE_STRIP:
  2670. eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
  2671. scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
  2672. break;
  2673. case SCSI_PROT_READ_PASS:
  2674. eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
  2675. scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
  2676. break;
  2677. case SCSI_PROT_WRITE_PASS:
  2678. if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) {
  2679. eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN;
  2680. scsiio_req->sgl[0].eedp.application_tag_translation_mask =
  2681. 0xffff;
  2682. } else
  2683. eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
  2684. scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
  2685. break;
  2686. default:
  2687. return;
  2688. }
  2689. if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
  2690. eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD;
  2691. if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
  2692. eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM;
  2693. if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
  2694. eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG |
  2695. MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
  2696. scsiio_req->cdb.eedp32.primary_reference_tag =
  2697. cpu_to_be32(scsi_prot_ref_tag(scmd));
  2698. }
  2699. if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT)
  2700. eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
  2701. eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
  2702. switch (scsi_prot_interval(scmd)) {
  2703. case 512:
  2704. scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512;
  2705. break;
  2706. case 520:
  2707. scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520;
  2708. break;
  2709. case 4080:
  2710. scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080;
  2711. break;
  2712. case 4088:
  2713. scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088;
  2714. break;
  2715. case 4096:
  2716. scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096;
  2717. break;
  2718. case 4104:
  2719. scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104;
  2720. break;
  2721. case 4160:
  2722. scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160;
  2723. break;
  2724. default:
  2725. break;
  2726. }
  2727. scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags);
  2728. scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED;
  2729. }
  2730. /**
  2731. * mpi3mr_build_sense_buffer - Map sense information
  2732. * @desc: Sense type
  2733. * @buf: Sense buffer to populate
  2734. * @key: Sense key
  2735. * @asc: Additional sense code
  2736. * @ascq: Additional sense code qualifier
  2737. *
  2738. * Maps the given sense information into either descriptor or
  2739. * fixed format sense data.
  2740. *
  2741. * Return: Nothing
  2742. */
  2743. static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key,
  2744. u8 asc, u8 ascq)
  2745. {
  2746. if (desc) {
  2747. buf[0] = 0x72; /* descriptor, current */
  2748. buf[1] = key;
  2749. buf[2] = asc;
  2750. buf[3] = ascq;
  2751. buf[7] = 0;
  2752. } else {
  2753. buf[0] = 0x70; /* fixed, current */
  2754. buf[2] = key;
  2755. buf[7] = 0xa;
  2756. buf[12] = asc;
  2757. buf[13] = ascq;
  2758. }
  2759. }
  2760. /**
  2761. * mpi3mr_map_eedp_error - Map EEDP errors from IOC status
  2762. * @scmd: SCSI command reference
  2763. * @ioc_status: status of MPI3 request
  2764. *
  2765. * Maps the EEDP error status of the SCSI IO request to sense
  2766. * data.
  2767. *
  2768. * Return: Nothing
  2769. */
  2770. static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd,
  2771. u16 ioc_status)
  2772. {
  2773. u8 ascq = 0;
  2774. switch (ioc_status) {
  2775. case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
  2776. ascq = 0x01;
  2777. break;
  2778. case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
  2779. ascq = 0x02;
  2780. break;
  2781. case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
  2782. ascq = 0x03;
  2783. break;
  2784. default:
  2785. ascq = 0x00;
  2786. break;
  2787. }
  2788. mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
  2789. 0x10, ascq);
  2790. scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
  2791. }
  2792. /**
  2793. * mpi3mr_process_op_reply_desc - reply descriptor handler
  2794. * @mrioc: Adapter instance reference
  2795. * @reply_desc: Operational reply descriptor
  2796. * @reply_dma: place holder for reply DMA address
  2797. * @qidx: Operational queue index
  2798. *
  2799. * Process the operational reply descriptor and identifies the
  2800. * descriptor type. Based on the descriptor map the MPI3 request
  2801. * status to a SCSI command status and calls scsi_done call
  2802. * back.
  2803. *
  2804. * Return: Nothing
  2805. */
  2806. void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
  2807. struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx)
  2808. {
  2809. u16 reply_desc_type, host_tag = 0;
  2810. u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
  2811. u32 ioc_loginfo = 0;
  2812. struct mpi3_status_reply_descriptor *status_desc = NULL;
  2813. struct mpi3_address_reply_descriptor *addr_desc = NULL;
  2814. struct mpi3_success_reply_descriptor *success_desc = NULL;
  2815. struct mpi3_scsi_io_reply *scsi_reply = NULL;
  2816. struct scsi_cmnd *scmd = NULL;
  2817. struct scmd_priv *priv = NULL;
  2818. u8 *sense_buf = NULL;
  2819. u8 scsi_state = 0, scsi_status = 0, sense_state = 0;
  2820. u32 xfer_count = 0, sense_count = 0, resp_data = 0;
  2821. u16 dev_handle = 0xFFFF;
  2822. struct scsi_sense_hdr sshdr;
  2823. struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL;
  2824. struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
  2825. u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
  2826. struct mpi3mr_throttle_group_info *tg = NULL;
  2827. u8 throttle_enabled_dev = 0;
  2828. *reply_dma = 0;
  2829. reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
  2830. MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
  2831. switch (reply_desc_type) {
  2832. case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
  2833. status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
  2834. host_tag = le16_to_cpu(status_desc->host_tag);
  2835. ioc_status = le16_to_cpu(status_desc->ioc_status);
  2836. if (ioc_status &
  2837. MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
  2838. ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
  2839. ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
  2840. break;
  2841. case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
  2842. addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
  2843. *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
  2844. scsi_reply = mpi3mr_get_reply_virt_addr(mrioc,
  2845. *reply_dma);
  2846. if (!scsi_reply) {
  2847. panic("%s: scsi_reply is NULL, this shouldn't happen\n",
  2848. mrioc->name);
  2849. goto out;
  2850. }
  2851. host_tag = le16_to_cpu(scsi_reply->host_tag);
  2852. ioc_status = le16_to_cpu(scsi_reply->ioc_status);
  2853. scsi_status = scsi_reply->scsi_status;
  2854. scsi_state = scsi_reply->scsi_state;
  2855. dev_handle = le16_to_cpu(scsi_reply->dev_handle);
  2856. sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
  2857. xfer_count = le32_to_cpu(scsi_reply->transfer_count);
  2858. sense_count = le32_to_cpu(scsi_reply->sense_count);
  2859. resp_data = le32_to_cpu(scsi_reply->response_data);
  2860. sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
  2861. le64_to_cpu(scsi_reply->sense_data_buffer_address));
  2862. if (ioc_status &
  2863. MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
  2864. ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info);
  2865. ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
  2866. if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
  2867. panic("%s: Ran out of sense buffers\n", mrioc->name);
  2868. break;
  2869. case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
  2870. success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
  2871. host_tag = le16_to_cpu(success_desc->host_tag);
  2872. break;
  2873. default:
  2874. break;
  2875. }
  2876. scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx);
  2877. if (!scmd) {
  2878. panic("%s: Cannot Identify scmd for host_tag 0x%x\n",
  2879. mrioc->name, host_tag);
  2880. goto out;
  2881. }
  2882. priv = scsi_cmd_priv(scmd);
  2883. data_len_blks = scsi_bufflen(scmd) >> 9;
  2884. sdev_priv_data = scmd->device->hostdata;
  2885. if (sdev_priv_data) {
  2886. stgt_priv_data = sdev_priv_data->tgt_priv_data;
  2887. if (stgt_priv_data) {
  2888. tg = stgt_priv_data->throttle_group;
  2889. throttle_enabled_dev =
  2890. stgt_priv_data->io_throttle_enabled;
  2891. }
  2892. }
  2893. if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) &&
  2894. throttle_enabled_dev)) {
  2895. ioc_pend_data_len = atomic_sub_return(data_len_blks,
  2896. &mrioc->pend_large_data_sz);
  2897. if (tg) {
  2898. tg_pend_data_len = atomic_sub_return(data_len_blks,
  2899. &tg->pend_large_data_sz);
  2900. if (tg->io_divert && ((ioc_pend_data_len <=
  2901. mrioc->io_throttle_low) &&
  2902. (tg_pend_data_len <= tg->low))) {
  2903. tg->io_divert = 0;
  2904. mpi3mr_set_io_divert_for_all_vd_in_tg(
  2905. mrioc, tg, 0);
  2906. }
  2907. } else {
  2908. if (ioc_pend_data_len <= mrioc->io_throttle_low)
  2909. stgt_priv_data->io_divert = 0;
  2910. }
  2911. } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) {
  2912. ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz);
  2913. if (!tg) {
  2914. if (ioc_pend_data_len <= mrioc->io_throttle_low)
  2915. stgt_priv_data->io_divert = 0;
  2916. } else if (ioc_pend_data_len <= mrioc->io_throttle_low) {
  2917. tg_pend_data_len = atomic_read(&tg->pend_large_data_sz);
  2918. if (tg->io_divert && (tg_pend_data_len <= tg->low)) {
  2919. tg->io_divert = 0;
  2920. mpi3mr_set_io_divert_for_all_vd_in_tg(
  2921. mrioc, tg, 0);
  2922. }
  2923. }
  2924. }
  2925. if (success_desc) {
  2926. scmd->result = DID_OK << 16;
  2927. goto out_success;
  2928. }
  2929. scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count);
  2930. if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN &&
  2931. xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
  2932. scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
  2933. scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
  2934. ioc_status = MPI3_IOCSTATUS_SUCCESS;
  2935. if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count &&
  2936. sense_buf) {
  2937. u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count);
  2938. memcpy(scmd->sense_buffer, sense_buf, sz);
  2939. }
  2940. switch (ioc_status) {
  2941. case MPI3_IOCSTATUS_BUSY:
  2942. case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
  2943. scmd->result = SAM_STAT_BUSY;
  2944. break;
  2945. case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
  2946. scmd->result = DID_NO_CONNECT << 16;
  2947. break;
  2948. case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
  2949. scmd->result = DID_SOFT_ERROR << 16;
  2950. break;
  2951. case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
  2952. case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
  2953. scmd->result = DID_RESET << 16;
  2954. break;
  2955. case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
  2956. if ((xfer_count == 0) || (scmd->underflow > xfer_count))
  2957. scmd->result = DID_SOFT_ERROR << 16;
  2958. else
  2959. scmd->result = (DID_OK << 16) | scsi_status;
  2960. break;
  2961. case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
  2962. scmd->result = (DID_OK << 16) | scsi_status;
  2963. if (sense_state == MPI3_SCSI_STATE_SENSE_VALID)
  2964. break;
  2965. if (xfer_count < scmd->underflow) {
  2966. if (scsi_status == SAM_STAT_BUSY)
  2967. scmd->result = SAM_STAT_BUSY;
  2968. else
  2969. scmd->result = DID_SOFT_ERROR << 16;
  2970. } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
  2971. (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE))
  2972. scmd->result = DID_SOFT_ERROR << 16;
  2973. else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
  2974. scmd->result = DID_RESET << 16;
  2975. break;
  2976. case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
  2977. scsi_set_resid(scmd, 0);
  2978. fallthrough;
  2979. case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
  2980. case MPI3_IOCSTATUS_SUCCESS:
  2981. scmd->result = (DID_OK << 16) | scsi_status;
  2982. if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
  2983. (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) ||
  2984. (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY))
  2985. scmd->result = DID_SOFT_ERROR << 16;
  2986. else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
  2987. scmd->result = DID_RESET << 16;
  2988. break;
  2989. case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
  2990. case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
  2991. case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
  2992. mpi3mr_map_eedp_error(scmd, ioc_status);
  2993. break;
  2994. case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
  2995. case MPI3_IOCSTATUS_INVALID_FUNCTION:
  2996. case MPI3_IOCSTATUS_INVALID_SGL:
  2997. case MPI3_IOCSTATUS_INTERNAL_ERROR:
  2998. case MPI3_IOCSTATUS_INVALID_FIELD:
  2999. case MPI3_IOCSTATUS_INVALID_STATE:
  3000. case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
  3001. case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
  3002. case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
  3003. default:
  3004. scmd->result = DID_SOFT_ERROR << 16;
  3005. break;
  3006. }
  3007. if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) &&
  3008. (scmd->cmnd[0] != ATA_16) &&
  3009. mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) {
  3010. ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__,
  3011. scmd->result);
  3012. scsi_print_command(scmd);
  3013. ioc_info(mrioc,
  3014. "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n",
  3015. __func__, dev_handle, ioc_status, ioc_loginfo,
  3016. priv->req_q_idx + 1);
  3017. ioc_info(mrioc,
  3018. " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n",
  3019. host_tag, scsi_state, scsi_status, xfer_count, resp_data);
  3020. if (sense_buf) {
  3021. scsi_normalize_sense(sense_buf, sense_count, &sshdr);
  3022. ioc_info(mrioc,
  3023. "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n",
  3024. __func__, sense_count, sshdr.sense_key,
  3025. sshdr.asc, sshdr.ascq);
  3026. }
  3027. }
  3028. out_success:
  3029. if (priv->meta_sg_valid) {
  3030. dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
  3031. scsi_prot_sg_count(scmd), scmd->sc_data_direction);
  3032. }
  3033. mpi3mr_clear_scmd_priv(mrioc, scmd);
  3034. scsi_dma_unmap(scmd);
  3035. scsi_done(scmd);
  3036. out:
  3037. if (sense_buf)
  3038. mpi3mr_repost_sense_buf(mrioc,
  3039. le64_to_cpu(scsi_reply->sense_data_buffer_address));
  3040. }
  3041. /**
  3042. * mpi3mr_get_chain_idx - get free chain buffer index
  3043. * @mrioc: Adapter instance reference
  3044. *
  3045. * Try to get a free chain buffer index from the free pool.
  3046. *
  3047. * Return: -1 on failure or the free chain buffer index
  3048. */
  3049. static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc)
  3050. {
  3051. u8 retry_count = 5;
  3052. int cmd_idx = -1;
  3053. do {
  3054. spin_lock(&mrioc->chain_buf_lock);
  3055. cmd_idx = find_first_zero_bit(mrioc->chain_bitmap,
  3056. mrioc->chain_buf_count);
  3057. if (cmd_idx < mrioc->chain_buf_count) {
  3058. set_bit(cmd_idx, mrioc->chain_bitmap);
  3059. spin_unlock(&mrioc->chain_buf_lock);
  3060. break;
  3061. }
  3062. spin_unlock(&mrioc->chain_buf_lock);
  3063. cmd_idx = -1;
  3064. } while (retry_count--);
  3065. return cmd_idx;
  3066. }
  3067. /**
  3068. * mpi3mr_prepare_sg_scmd - build scatter gather list
  3069. * @mrioc: Adapter instance reference
  3070. * @scmd: SCSI command reference
  3071. * @scsiio_req: MPI3 SCSI IO request
  3072. *
  3073. * This function maps SCSI command's data and protection SGEs to
  3074. * MPI request SGEs. If required additional 4K chain buffer is
  3075. * used to send the SGEs.
  3076. *
  3077. * Return: 0 on success, -ENOMEM on dma_map_sg failure
  3078. */
  3079. static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
  3080. struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
  3081. {
  3082. dma_addr_t chain_dma;
  3083. struct scatterlist *sg_scmd;
  3084. void *sg_local, *chain;
  3085. u32 chain_length;
  3086. int sges_left, chain_idx;
  3087. u32 sges_in_segment;
  3088. u8 simple_sgl_flags;
  3089. u8 simple_sgl_flags_last;
  3090. u8 last_chain_sgl_flags;
  3091. struct chain_element *chain_req;
  3092. struct scmd_priv *priv = NULL;
  3093. u32 meta_sg = le32_to_cpu(scsiio_req->flags) &
  3094. MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI;
  3095. priv = scsi_cmd_priv(scmd);
  3096. simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
  3097. MPI3_SGE_FLAGS_DLAS_SYSTEM;
  3098. simple_sgl_flags_last = simple_sgl_flags |
  3099. MPI3_SGE_FLAGS_END_OF_LIST;
  3100. last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
  3101. MPI3_SGE_FLAGS_DLAS_SYSTEM;
  3102. if (meta_sg)
  3103. sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX];
  3104. else
  3105. sg_local = &scsiio_req->sgl;
  3106. if (!scsiio_req->data_length && !meta_sg) {
  3107. mpi3mr_build_zero_len_sge(sg_local);
  3108. return 0;
  3109. }
  3110. if (meta_sg) {
  3111. sg_scmd = scsi_prot_sglist(scmd);
  3112. sges_left = dma_map_sg(&mrioc->pdev->dev,
  3113. scsi_prot_sglist(scmd),
  3114. scsi_prot_sg_count(scmd),
  3115. scmd->sc_data_direction);
  3116. priv->meta_sg_valid = 1; /* To unmap meta sg DMA */
  3117. } else {
  3118. sg_scmd = scsi_sglist(scmd);
  3119. sges_left = scsi_dma_map(scmd);
  3120. }
  3121. if (sges_left < 0) {
  3122. sdev_printk(KERN_ERR, scmd->device,
  3123. "scsi_dma_map failed: request for %d bytes!\n",
  3124. scsi_bufflen(scmd));
  3125. return -ENOMEM;
  3126. }
  3127. if (sges_left > MPI3MR_SG_DEPTH) {
  3128. sdev_printk(KERN_ERR, scmd->device,
  3129. "scsi_dma_map returned unsupported sge count %d!\n",
  3130. sges_left);
  3131. return -ENOMEM;
  3132. }
  3133. sges_in_segment = (mrioc->facts.op_req_sz -
  3134. offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common);
  3135. if (scsiio_req->sgl[0].eedp.flags ==
  3136. MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) {
  3137. sg_local += sizeof(struct mpi3_sge_common);
  3138. sges_in_segment--;
  3139. /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */
  3140. }
  3141. if (scsiio_req->msg_flags ==
  3142. MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) {
  3143. sges_in_segment--;
  3144. /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */
  3145. }
  3146. if (meta_sg)
  3147. sges_in_segment = 1;
  3148. if (sges_left <= sges_in_segment)
  3149. goto fill_in_last_segment;
  3150. /* fill in main message segment when there is a chain following */
  3151. while (sges_in_segment > 1) {
  3152. mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
  3153. sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
  3154. sg_scmd = sg_next(sg_scmd);
  3155. sg_local += sizeof(struct mpi3_sge_common);
  3156. sges_left--;
  3157. sges_in_segment--;
  3158. }
  3159. chain_idx = mpi3mr_get_chain_idx(mrioc);
  3160. if (chain_idx < 0)
  3161. return -1;
  3162. chain_req = &mrioc->chain_sgl_list[chain_idx];
  3163. if (meta_sg)
  3164. priv->meta_chain_idx = chain_idx;
  3165. else
  3166. priv->chain_idx = chain_idx;
  3167. chain = chain_req->addr;
  3168. chain_dma = chain_req->dma_addr;
  3169. sges_in_segment = sges_left;
  3170. chain_length = sges_in_segment * sizeof(struct mpi3_sge_common);
  3171. mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
  3172. chain_length, chain_dma);
  3173. sg_local = chain;
  3174. fill_in_last_segment:
  3175. while (sges_left > 0) {
  3176. if (sges_left == 1)
  3177. mpi3mr_add_sg_single(sg_local,
  3178. simple_sgl_flags_last, sg_dma_len(sg_scmd),
  3179. sg_dma_address(sg_scmd));
  3180. else
  3181. mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
  3182. sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
  3183. sg_scmd = sg_next(sg_scmd);
  3184. sg_local += sizeof(struct mpi3_sge_common);
  3185. sges_left--;
  3186. }
  3187. return 0;
  3188. }
  3189. /**
  3190. * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO
  3191. * @mrioc: Adapter instance reference
  3192. * @scmd: SCSI command reference
  3193. * @scsiio_req: MPI3 SCSI IO request
  3194. *
  3195. * This function calls mpi3mr_prepare_sg_scmd for constructing
  3196. * both data SGEs and protection information SGEs in the MPI
  3197. * format from the SCSI Command as appropriate .
  3198. *
  3199. * Return: return value of mpi3mr_prepare_sg_scmd.
  3200. */
  3201. static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc,
  3202. struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
  3203. {
  3204. int ret;
  3205. ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
  3206. if (ret)
  3207. return ret;
  3208. if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) {
  3209. /* There is a valid meta sg */
  3210. scsiio_req->flags |=
  3211. cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI);
  3212. ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
  3213. }
  3214. return ret;
  3215. }
  3216. /**
  3217. * mpi3mr_tm_response_name - get TM response as a string
  3218. * @resp_code: TM response code
  3219. *
  3220. * Convert known task management response code as a readable
  3221. * string.
  3222. *
  3223. * Return: response code string.
  3224. */
  3225. static const char *mpi3mr_tm_response_name(u8 resp_code)
  3226. {
  3227. char *desc;
  3228. switch (resp_code) {
  3229. case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
  3230. desc = "task management request completed";
  3231. break;
  3232. case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
  3233. desc = "invalid frame";
  3234. break;
  3235. case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
  3236. desc = "task management request not supported";
  3237. break;
  3238. case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
  3239. desc = "task management request failed";
  3240. break;
  3241. case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
  3242. desc = "task management request succeeded";
  3243. break;
  3244. case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
  3245. desc = "invalid LUN";
  3246. break;
  3247. case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
  3248. desc = "overlapped tag attempted";
  3249. break;
  3250. case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
  3251. desc = "task queued, however not sent to target";
  3252. break;
  3253. case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
  3254. desc = "task management request denied by NVMe device";
  3255. break;
  3256. default:
  3257. desc = "unknown";
  3258. break;
  3259. }
  3260. return desc;
  3261. }
  3262. inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc)
  3263. {
  3264. int i;
  3265. int num_of_reply_queues =
  3266. mrioc->num_op_reply_q + mrioc->op_reply_q_offset;
  3267. for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++)
  3268. mpi3mr_process_op_reply_q(mrioc,
  3269. mrioc->intr_info[i].op_reply_q);
  3270. }
  3271. /**
  3272. * mpi3mr_issue_tm - Issue Task Management request
  3273. * @mrioc: Adapter instance reference
  3274. * @tm_type: Task Management type
  3275. * @handle: Device handle
  3276. * @lun: lun ID
  3277. * @htag: Host tag of the TM request
  3278. * @timeout: TM timeout value
  3279. * @drv_cmd: Internal command tracker
  3280. * @resp_code: Response code place holder
  3281. * @scmd: SCSI command
  3282. *
  3283. * Issues a Task Management Request to the controller for a
  3284. * specified target, lun and command and wait for its completion
  3285. * and check TM response. Recover the TM if it timed out by
  3286. * issuing controller reset.
  3287. *
  3288. * Return: 0 on success, non-zero on errors
  3289. */
  3290. int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
  3291. u16 handle, uint lun, u16 htag, ulong timeout,
  3292. struct mpi3mr_drv_cmd *drv_cmd,
  3293. u8 *resp_code, struct scsi_cmnd *scmd)
  3294. {
  3295. struct mpi3_scsi_task_mgmt_request tm_req;
  3296. struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
  3297. int retval = 0;
  3298. struct mpi3mr_tgt_dev *tgtdev = NULL;
  3299. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
  3300. struct scmd_priv *cmd_priv = NULL;
  3301. struct scsi_device *sdev = NULL;
  3302. struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
  3303. ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n",
  3304. __func__, tm_type, handle);
  3305. if (mrioc->unrecoverable) {
  3306. retval = -1;
  3307. ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n",
  3308. __func__);
  3309. goto out;
  3310. }
  3311. memset(&tm_req, 0, sizeof(tm_req));
  3312. mutex_lock(&drv_cmd->mutex);
  3313. if (drv_cmd->state & MPI3MR_CMD_PENDING) {
  3314. retval = -1;
  3315. ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
  3316. mutex_unlock(&drv_cmd->mutex);
  3317. goto out;
  3318. }
  3319. if (mrioc->reset_in_progress) {
  3320. retval = -1;
  3321. ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__);
  3322. mutex_unlock(&drv_cmd->mutex);
  3323. goto out;
  3324. }
  3325. drv_cmd->state = MPI3MR_CMD_PENDING;
  3326. drv_cmd->is_waiting = 1;
  3327. drv_cmd->callback = NULL;
  3328. tm_req.dev_handle = cpu_to_le16(handle);
  3329. tm_req.task_type = tm_type;
  3330. tm_req.host_tag = cpu_to_le16(htag);
  3331. int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun);
  3332. tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
  3333. tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
  3334. if (scmd) {
  3335. sdev = scmd->device;
  3336. sdev_priv_data = sdev->hostdata;
  3337. scsi_tgt_priv_data = ((sdev_priv_data) ?
  3338. sdev_priv_data->tgt_priv_data : NULL);
  3339. } else {
  3340. if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
  3341. scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
  3342. tgtdev->starget->hostdata;
  3343. }
  3344. if (scsi_tgt_priv_data)
  3345. atomic_inc(&scsi_tgt_priv_data->block_io);
  3346. if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
  3347. if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
  3348. timeout = tgtdev->dev_spec.pcie_inf.abort_to;
  3349. else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to)
  3350. timeout = tgtdev->dev_spec.pcie_inf.reset_to;
  3351. }
  3352. init_completion(&drv_cmd->done);
  3353. retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
  3354. if (retval) {
  3355. ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__);
  3356. goto out_unlock;
  3357. }
  3358. wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ));
  3359. if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
  3360. drv_cmd->is_waiting = 0;
  3361. retval = -1;
  3362. if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
  3363. dprint_tm(mrioc,
  3364. "task management request timed out after %ld seconds\n",
  3365. timeout);
  3366. if (mrioc->logging_level & MPI3_DEBUG_TM)
  3367. dprint_dump_req(&tm_req, sizeof(tm_req)/4);
  3368. mpi3mr_soft_reset_handler(mrioc,
  3369. MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
  3370. }
  3371. goto out_unlock;
  3372. }
  3373. if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) {
  3374. dprint_tm(mrioc, "invalid task management reply message\n");
  3375. retval = -1;
  3376. goto out_unlock;
  3377. }
  3378. tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
  3379. switch (drv_cmd->ioc_status) {
  3380. case MPI3_IOCSTATUS_SUCCESS:
  3381. *resp_code = le32_to_cpu(tm_reply->response_data) &
  3382. MPI3MR_RI_MASK_RESPCODE;
  3383. break;
  3384. case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
  3385. *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
  3386. break;
  3387. default:
  3388. dprint_tm(mrioc,
  3389. "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
  3390. handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
  3391. retval = -1;
  3392. goto out_unlock;
  3393. }
  3394. switch (*resp_code) {
  3395. case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
  3396. case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
  3397. break;
  3398. case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
  3399. if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
  3400. retval = -1;
  3401. break;
  3402. default:
  3403. retval = -1;
  3404. break;
  3405. }
  3406. dprint_tm(mrioc,
  3407. "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n",
  3408. tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
  3409. le32_to_cpu(tm_reply->termination_count),
  3410. mpi3mr_tm_response_name(*resp_code), *resp_code);
  3411. if (!retval) {
  3412. mpi3mr_ioc_disable_intr(mrioc);
  3413. mpi3mr_poll_pend_io_completions(mrioc);
  3414. mpi3mr_ioc_enable_intr(mrioc);
  3415. mpi3mr_poll_pend_io_completions(mrioc);
  3416. mpi3mr_process_admin_reply_q(mrioc);
  3417. }
  3418. switch (tm_type) {
  3419. case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
  3420. if (!scsi_tgt_priv_data)
  3421. break;
  3422. scsi_tgt_priv_data->pend_count = 0;
  3423. blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
  3424. mpi3mr_count_tgt_pending,
  3425. (void *)scsi_tgt_priv_data->starget);
  3426. break;
  3427. case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
  3428. if (!sdev_priv_data)
  3429. break;
  3430. sdev_priv_data->pend_count = 0;
  3431. blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
  3432. mpi3mr_count_dev_pending, (void *)sdev);
  3433. break;
  3434. default:
  3435. break;
  3436. }
  3437. out_unlock:
  3438. drv_cmd->state = MPI3MR_CMD_NOTUSED;
  3439. mutex_unlock(&drv_cmd->mutex);
  3440. if (scsi_tgt_priv_data)
  3441. atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
  3442. if (tgtdev)
  3443. mpi3mr_tgtdev_put(tgtdev);
  3444. out:
  3445. return retval;
  3446. }
  3447. /**
  3448. * mpi3mr_bios_param - BIOS param callback
  3449. * @sdev: SCSI device reference
  3450. * @bdev: Block device reference
  3451. * @capacity: Capacity in logical sectors
  3452. * @params: Parameter array
  3453. *
  3454. * Just the parameters with heads/secots/cylinders.
  3455. *
  3456. * Return: 0 always
  3457. */
  3458. static int mpi3mr_bios_param(struct scsi_device *sdev,
  3459. struct block_device *bdev, sector_t capacity, int params[])
  3460. {
  3461. int heads;
  3462. int sectors;
  3463. sector_t cylinders;
  3464. ulong dummy;
  3465. heads = 64;
  3466. sectors = 32;
  3467. dummy = heads * sectors;
  3468. cylinders = capacity;
  3469. sector_div(cylinders, dummy);
  3470. if ((ulong)capacity >= 0x200000) {
  3471. heads = 255;
  3472. sectors = 63;
  3473. dummy = heads * sectors;
  3474. cylinders = capacity;
  3475. sector_div(cylinders, dummy);
  3476. }
  3477. params[0] = heads;
  3478. params[1] = sectors;
  3479. params[2] = cylinders;
  3480. return 0;
  3481. }
  3482. /**
  3483. * mpi3mr_map_queues - Map queues callback handler
  3484. * @shost: SCSI host reference
  3485. *
  3486. * Maps default and poll queues.
  3487. *
  3488. * Return: return zero.
  3489. */
  3490. static void mpi3mr_map_queues(struct Scsi_Host *shost)
  3491. {
  3492. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  3493. int i, qoff, offset;
  3494. struct blk_mq_queue_map *map = NULL;
  3495. offset = mrioc->op_reply_q_offset;
  3496. for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
  3497. map = &shost->tag_set.map[i];
  3498. map->nr_queues = 0;
  3499. if (i == HCTX_TYPE_DEFAULT)
  3500. map->nr_queues = mrioc->default_qcount;
  3501. else if (i == HCTX_TYPE_POLL)
  3502. map->nr_queues = mrioc->active_poll_qcount;
  3503. if (!map->nr_queues) {
  3504. BUG_ON(i == HCTX_TYPE_DEFAULT);
  3505. continue;
  3506. }
  3507. /*
  3508. * The poll queue(s) doesn't have an IRQ (and hence IRQ
  3509. * affinity), so use the regular blk-mq cpu mapping
  3510. */
  3511. map->queue_offset = qoff;
  3512. if (i != HCTX_TYPE_POLL)
  3513. blk_mq_pci_map_queues(map, mrioc->pdev, offset);
  3514. else
  3515. blk_mq_map_queues(map);
  3516. qoff += map->nr_queues;
  3517. offset += map->nr_queues;
  3518. }
  3519. }
  3520. /**
  3521. * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
  3522. * @mrioc: Adapter instance reference
  3523. *
  3524. * Calculate the pending I/Os for the controller and return.
  3525. *
  3526. * Return: Number of pending I/Os
  3527. */
  3528. static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc)
  3529. {
  3530. u16 i;
  3531. uint pend_ios = 0;
  3532. for (i = 0; i < mrioc->num_op_reply_q; i++)
  3533. pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios);
  3534. return pend_ios;
  3535. }
  3536. /**
  3537. * mpi3mr_print_pending_host_io - print pending I/Os
  3538. * @mrioc: Adapter instance reference
  3539. *
  3540. * Print number of pending I/Os and each I/O details prior to
  3541. * reset for debug purpose.
  3542. *
  3543. * Return: Nothing
  3544. */
  3545. static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc)
  3546. {
  3547. struct Scsi_Host *shost = mrioc->shost;
  3548. ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n",
  3549. __func__, mpi3mr_get_fw_pending_ios(mrioc));
  3550. blk_mq_tagset_busy_iter(&shost->tag_set,
  3551. mpi3mr_print_scmd, (void *)mrioc);
  3552. }
  3553. /**
  3554. * mpi3mr_wait_for_host_io - block for I/Os to complete
  3555. * @mrioc: Adapter instance reference
  3556. * @timeout: time out in seconds
  3557. * Waits for pending I/Os for the given adapter to complete or
  3558. * to hit the timeout.
  3559. *
  3560. * Return: Nothing
  3561. */
  3562. void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout)
  3563. {
  3564. enum mpi3mr_iocstate iocstate;
  3565. int i = 0;
  3566. iocstate = mpi3mr_get_iocstate(mrioc);
  3567. if (iocstate != MRIOC_STATE_READY)
  3568. return;
  3569. if (!mpi3mr_get_fw_pending_ios(mrioc))
  3570. return;
  3571. ioc_info(mrioc,
  3572. "%s :Waiting for %d seconds prior to reset for %d I/O\n",
  3573. __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc));
  3574. for (i = 0; i < timeout; i++) {
  3575. if (!mpi3mr_get_fw_pending_ios(mrioc))
  3576. break;
  3577. iocstate = mpi3mr_get_iocstate(mrioc);
  3578. if (iocstate != MRIOC_STATE_READY)
  3579. break;
  3580. msleep(1000);
  3581. }
  3582. ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__,
  3583. mpi3mr_get_fw_pending_ios(mrioc));
  3584. }
  3585. /**
  3586. * mpi3mr_eh_host_reset - Host reset error handling callback
  3587. * @scmd: SCSI command reference
  3588. *
  3589. * Issue controller reset if the scmd is for a Physical Device,
  3590. * if the scmd is for RAID volume, then wait for
  3591. * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any
  3592. * pending I/Os prior to issuing reset to the controller.
  3593. *
  3594. * Return: SUCCESS of successful reset else FAILED
  3595. */
  3596. static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd)
  3597. {
  3598. struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
  3599. struct mpi3mr_stgt_priv_data *stgt_priv_data;
  3600. struct mpi3mr_sdev_priv_data *sdev_priv_data;
  3601. u8 dev_type = MPI3_DEVICE_DEVFORM_VD;
  3602. int retval = FAILED, ret;
  3603. sdev_priv_data = scmd->device->hostdata;
  3604. if (sdev_priv_data && sdev_priv_data->tgt_priv_data) {
  3605. stgt_priv_data = sdev_priv_data->tgt_priv_data;
  3606. dev_type = stgt_priv_data->dev_type;
  3607. }
  3608. if (dev_type == MPI3_DEVICE_DEVFORM_VD) {
  3609. mpi3mr_wait_for_host_io(mrioc,
  3610. MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
  3611. if (!mpi3mr_get_fw_pending_ios(mrioc)) {
  3612. retval = SUCCESS;
  3613. goto out;
  3614. }
  3615. }
  3616. mpi3mr_print_pending_host_io(mrioc);
  3617. ret = mpi3mr_soft_reset_handler(mrioc,
  3618. MPI3MR_RESET_FROM_EH_HOS, 1);
  3619. if (ret)
  3620. goto out;
  3621. retval = SUCCESS;
  3622. out:
  3623. sdev_printk(KERN_INFO, scmd->device,
  3624. "Host reset is %s for scmd(%p)\n",
  3625. ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
  3626. return retval;
  3627. }
  3628. /**
  3629. * mpi3mr_eh_target_reset - Target reset error handling callback
  3630. * @scmd: SCSI command reference
  3631. *
  3632. * Issue Target reset Task Management and verify the scmd is
  3633. * terminated successfully and return status accordingly.
  3634. *
  3635. * Return: SUCCESS of successful termination of the scmd else
  3636. * FAILED
  3637. */
  3638. static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
  3639. {
  3640. struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
  3641. struct mpi3mr_stgt_priv_data *stgt_priv_data;
  3642. struct mpi3mr_sdev_priv_data *sdev_priv_data;
  3643. u16 dev_handle;
  3644. u8 resp_code = 0;
  3645. int retval = FAILED, ret = 0;
  3646. sdev_printk(KERN_INFO, scmd->device,
  3647. "Attempting Target Reset! scmd(%p)\n", scmd);
  3648. scsi_print_command(scmd);
  3649. sdev_priv_data = scmd->device->hostdata;
  3650. if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
  3651. sdev_printk(KERN_INFO, scmd->device,
  3652. "SCSI device is not available\n");
  3653. retval = SUCCESS;
  3654. goto out;
  3655. }
  3656. stgt_priv_data = sdev_priv_data->tgt_priv_data;
  3657. dev_handle = stgt_priv_data->dev_handle;
  3658. if (stgt_priv_data->dev_removed) {
  3659. sdev_printk(KERN_INFO, scmd->device,
  3660. "%s:target(handle = 0x%04x) is removed, target reset is not issued\n",
  3661. mrioc->name, dev_handle);
  3662. retval = FAILED;
  3663. goto out;
  3664. }
  3665. sdev_printk(KERN_INFO, scmd->device,
  3666. "Target Reset is issued to handle(0x%04x)\n",
  3667. dev_handle);
  3668. ret = mpi3mr_issue_tm(mrioc,
  3669. MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle,
  3670. sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
  3671. MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
  3672. if (ret)
  3673. goto out;
  3674. if (stgt_priv_data->pend_count) {
  3675. sdev_printk(KERN_INFO, scmd->device,
  3676. "%s: target has %d pending commands, target reset is failed\n",
  3677. mrioc->name, stgt_priv_data->pend_count);
  3678. goto out;
  3679. }
  3680. retval = SUCCESS;
  3681. out:
  3682. sdev_printk(KERN_INFO, scmd->device,
  3683. "%s: target reset is %s for scmd(%p)\n", mrioc->name,
  3684. ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
  3685. return retval;
  3686. }
  3687. /**
  3688. * mpi3mr_eh_dev_reset- Device reset error handling callback
  3689. * @scmd: SCSI command reference
  3690. *
  3691. * Issue lun reset Task Management and verify the scmd is
  3692. * terminated successfully and return status accordingly.
  3693. *
  3694. * Return: SUCCESS of successful termination of the scmd else
  3695. * FAILED
  3696. */
  3697. static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd)
  3698. {
  3699. struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
  3700. struct mpi3mr_stgt_priv_data *stgt_priv_data;
  3701. struct mpi3mr_sdev_priv_data *sdev_priv_data;
  3702. u16 dev_handle;
  3703. u8 resp_code = 0;
  3704. int retval = FAILED, ret = 0;
  3705. sdev_printk(KERN_INFO, scmd->device,
  3706. "Attempting Device(lun) Reset! scmd(%p)\n", scmd);
  3707. scsi_print_command(scmd);
  3708. sdev_priv_data = scmd->device->hostdata;
  3709. if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
  3710. sdev_printk(KERN_INFO, scmd->device,
  3711. "SCSI device is not available\n");
  3712. retval = SUCCESS;
  3713. goto out;
  3714. }
  3715. stgt_priv_data = sdev_priv_data->tgt_priv_data;
  3716. dev_handle = stgt_priv_data->dev_handle;
  3717. if (stgt_priv_data->dev_removed) {
  3718. sdev_printk(KERN_INFO, scmd->device,
  3719. "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n",
  3720. mrioc->name, dev_handle);
  3721. retval = FAILED;
  3722. goto out;
  3723. }
  3724. sdev_printk(KERN_INFO, scmd->device,
  3725. "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle);
  3726. ret = mpi3mr_issue_tm(mrioc,
  3727. MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle,
  3728. sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
  3729. MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
  3730. if (ret)
  3731. goto out;
  3732. if (sdev_priv_data->pend_count) {
  3733. sdev_printk(KERN_INFO, scmd->device,
  3734. "%s: device has %d pending commands, device(LUN) reset is failed\n",
  3735. mrioc->name, sdev_priv_data->pend_count);
  3736. goto out;
  3737. }
  3738. retval = SUCCESS;
  3739. out:
  3740. sdev_printk(KERN_INFO, scmd->device,
  3741. "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name,
  3742. ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
  3743. return retval;
  3744. }
  3745. /**
  3746. * mpi3mr_scan_start - Scan start callback handler
  3747. * @shost: SCSI host reference
  3748. *
  3749. * Issue port enable request asynchronously.
  3750. *
  3751. * Return: Nothing
  3752. */
  3753. static void mpi3mr_scan_start(struct Scsi_Host *shost)
  3754. {
  3755. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  3756. mrioc->scan_started = 1;
  3757. ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__);
  3758. if (mpi3mr_issue_port_enable(mrioc, 1)) {
  3759. ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__);
  3760. mrioc->scan_started = 0;
  3761. mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
  3762. }
  3763. }
  3764. /**
  3765. * mpi3mr_scan_finished - Scan finished callback handler
  3766. * @shost: SCSI host reference
  3767. * @time: Jiffies from the scan start
  3768. *
  3769. * Checks whether the port enable is completed or timedout or
  3770. * failed and set the scan status accordingly after taking any
  3771. * recovery if required.
  3772. *
  3773. * Return: 1 on scan finished or timed out, 0 for in progress
  3774. */
  3775. static int mpi3mr_scan_finished(struct Scsi_Host *shost,
  3776. unsigned long time)
  3777. {
  3778. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  3779. u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
  3780. u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
  3781. if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
  3782. (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
  3783. ioc_err(mrioc, "port enable failed due to fault or reset\n");
  3784. mpi3mr_print_fault_info(mrioc);
  3785. mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
  3786. mrioc->scan_started = 0;
  3787. mrioc->init_cmds.is_waiting = 0;
  3788. mrioc->init_cmds.callback = NULL;
  3789. mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
  3790. }
  3791. if (time >= (pe_timeout * HZ)) {
  3792. ioc_err(mrioc, "port enable failed due to time out\n");
  3793. mpi3mr_check_rh_fault_ioc(mrioc,
  3794. MPI3MR_RESET_FROM_PE_TIMEOUT);
  3795. mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
  3796. mrioc->scan_started = 0;
  3797. mrioc->init_cmds.is_waiting = 0;
  3798. mrioc->init_cmds.callback = NULL;
  3799. mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
  3800. }
  3801. if (mrioc->scan_started)
  3802. return 0;
  3803. if (mrioc->scan_failed) {
  3804. ioc_err(mrioc,
  3805. "port enable failed with status=0x%04x\n",
  3806. mrioc->scan_failed);
  3807. } else
  3808. ioc_info(mrioc, "port enable is successfully completed\n");
  3809. mpi3mr_start_watchdog(mrioc);
  3810. mrioc->is_driver_loading = 0;
  3811. mrioc->stop_bsgs = 0;
  3812. return 1;
  3813. }
  3814. /**
  3815. * mpi3mr_slave_destroy - Slave destroy callback handler
  3816. * @sdev: SCSI device reference
  3817. *
  3818. * Cleanup and free per device(lun) private data.
  3819. *
  3820. * Return: Nothing.
  3821. */
  3822. static void mpi3mr_slave_destroy(struct scsi_device *sdev)
  3823. {
  3824. struct Scsi_Host *shost;
  3825. struct mpi3mr_ioc *mrioc;
  3826. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
  3827. struct mpi3mr_tgt_dev *tgt_dev = NULL;
  3828. unsigned long flags;
  3829. struct scsi_target *starget;
  3830. struct sas_rphy *rphy = NULL;
  3831. if (!sdev->hostdata)
  3832. return;
  3833. starget = scsi_target(sdev);
  3834. shost = dev_to_shost(&starget->dev);
  3835. mrioc = shost_priv(shost);
  3836. scsi_tgt_priv_data = starget->hostdata;
  3837. scsi_tgt_priv_data->num_luns--;
  3838. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  3839. if (starget->channel == mrioc->scsi_device_channel)
  3840. tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
  3841. else if (mrioc->sas_transport_enabled && !starget->channel) {
  3842. rphy = dev_to_rphy(starget->dev.parent);
  3843. tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
  3844. rphy->identify.sas_address, rphy);
  3845. }
  3846. if (tgt_dev && (!scsi_tgt_priv_data->num_luns))
  3847. tgt_dev->starget = NULL;
  3848. if (tgt_dev)
  3849. mpi3mr_tgtdev_put(tgt_dev);
  3850. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  3851. kfree(sdev->hostdata);
  3852. sdev->hostdata = NULL;
  3853. }
  3854. /**
  3855. * mpi3mr_target_destroy - Target destroy callback handler
  3856. * @starget: SCSI target reference
  3857. *
  3858. * Cleanup and free per target private data.
  3859. *
  3860. * Return: Nothing.
  3861. */
  3862. static void mpi3mr_target_destroy(struct scsi_target *starget)
  3863. {
  3864. struct Scsi_Host *shost;
  3865. struct mpi3mr_ioc *mrioc;
  3866. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
  3867. struct mpi3mr_tgt_dev *tgt_dev;
  3868. unsigned long flags;
  3869. if (!starget->hostdata)
  3870. return;
  3871. shost = dev_to_shost(&starget->dev);
  3872. mrioc = shost_priv(shost);
  3873. scsi_tgt_priv_data = starget->hostdata;
  3874. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  3875. tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data);
  3876. if (tgt_dev && (tgt_dev->starget == starget) &&
  3877. (tgt_dev->perst_id == starget->id))
  3878. tgt_dev->starget = NULL;
  3879. if (tgt_dev) {
  3880. scsi_tgt_priv_data->tgt_dev = NULL;
  3881. scsi_tgt_priv_data->perst_id = 0;
  3882. mpi3mr_tgtdev_put(tgt_dev);
  3883. mpi3mr_tgtdev_put(tgt_dev);
  3884. }
  3885. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  3886. kfree(starget->hostdata);
  3887. starget->hostdata = NULL;
  3888. }
  3889. /**
  3890. * mpi3mr_slave_configure - Slave configure callback handler
  3891. * @sdev: SCSI device reference
  3892. *
  3893. * Configure queue depth, max hardware sectors and virt boundary
  3894. * as required
  3895. *
  3896. * Return: 0 always.
  3897. */
  3898. static int mpi3mr_slave_configure(struct scsi_device *sdev)
  3899. {
  3900. struct scsi_target *starget;
  3901. struct Scsi_Host *shost;
  3902. struct mpi3mr_ioc *mrioc;
  3903. struct mpi3mr_tgt_dev *tgt_dev = NULL;
  3904. unsigned long flags;
  3905. int retval = 0;
  3906. struct sas_rphy *rphy = NULL;
  3907. starget = scsi_target(sdev);
  3908. shost = dev_to_shost(&starget->dev);
  3909. mrioc = shost_priv(shost);
  3910. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  3911. if (starget->channel == mrioc->scsi_device_channel)
  3912. tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
  3913. else if (mrioc->sas_transport_enabled && !starget->channel) {
  3914. rphy = dev_to_rphy(starget->dev.parent);
  3915. tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
  3916. rphy->identify.sas_address, rphy);
  3917. }
  3918. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  3919. if (!tgt_dev)
  3920. return -ENXIO;
  3921. mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth);
  3922. sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT;
  3923. blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT);
  3924. switch (tgt_dev->dev_type) {
  3925. case MPI3_DEVICE_DEVFORM_PCIE:
  3926. /*The block layer hw sector size = 512*/
  3927. if ((tgt_dev->dev_spec.pcie_inf.dev_info &
  3928. MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
  3929. MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
  3930. blk_queue_max_hw_sectors(sdev->request_queue,
  3931. tgt_dev->dev_spec.pcie_inf.mdts / 512);
  3932. if (tgt_dev->dev_spec.pcie_inf.pgsz == 0)
  3933. blk_queue_virt_boundary(sdev->request_queue,
  3934. ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
  3935. else
  3936. blk_queue_virt_boundary(sdev->request_queue,
  3937. ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
  3938. }
  3939. break;
  3940. default:
  3941. break;
  3942. }
  3943. mpi3mr_tgtdev_put(tgt_dev);
  3944. return retval;
  3945. }
  3946. /**
  3947. * mpi3mr_slave_alloc -Slave alloc callback handler
  3948. * @sdev: SCSI device reference
  3949. *
  3950. * Allocate per device(lun) private data and initialize it.
  3951. *
  3952. * Return: 0 on success -ENOMEM on memory allocation failure.
  3953. */
  3954. static int mpi3mr_slave_alloc(struct scsi_device *sdev)
  3955. {
  3956. struct Scsi_Host *shost;
  3957. struct mpi3mr_ioc *mrioc;
  3958. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
  3959. struct mpi3mr_tgt_dev *tgt_dev = NULL;
  3960. struct mpi3mr_sdev_priv_data *scsi_dev_priv_data;
  3961. unsigned long flags;
  3962. struct scsi_target *starget;
  3963. int retval = 0;
  3964. struct sas_rphy *rphy = NULL;
  3965. starget = scsi_target(sdev);
  3966. shost = dev_to_shost(&starget->dev);
  3967. mrioc = shost_priv(shost);
  3968. scsi_tgt_priv_data = starget->hostdata;
  3969. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  3970. if (starget->channel == mrioc->scsi_device_channel)
  3971. tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
  3972. else if (mrioc->sas_transport_enabled && !starget->channel) {
  3973. rphy = dev_to_rphy(starget->dev.parent);
  3974. tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
  3975. rphy->identify.sas_address, rphy);
  3976. }
  3977. if (tgt_dev) {
  3978. if (tgt_dev->starget == NULL)
  3979. tgt_dev->starget = starget;
  3980. mpi3mr_tgtdev_put(tgt_dev);
  3981. retval = 0;
  3982. } else {
  3983. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  3984. return -ENXIO;
  3985. }
  3986. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  3987. scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL);
  3988. if (!scsi_dev_priv_data)
  3989. return -ENOMEM;
  3990. scsi_dev_priv_data->lun_id = sdev->lun;
  3991. scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data;
  3992. sdev->hostdata = scsi_dev_priv_data;
  3993. scsi_tgt_priv_data->num_luns++;
  3994. return retval;
  3995. }
  3996. /**
  3997. * mpi3mr_target_alloc - Target alloc callback handler
  3998. * @starget: SCSI target reference
  3999. *
  4000. * Allocate per target private data and initialize it.
  4001. *
  4002. * Return: 0 on success -ENOMEM on memory allocation failure.
  4003. */
  4004. static int mpi3mr_target_alloc(struct scsi_target *starget)
  4005. {
  4006. struct Scsi_Host *shost = dev_to_shost(&starget->dev);
  4007. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  4008. struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
  4009. struct mpi3mr_tgt_dev *tgt_dev;
  4010. unsigned long flags;
  4011. int retval = 0;
  4012. struct sas_rphy *rphy = NULL;
  4013. bool update_stgt_priv_data = false;
  4014. scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
  4015. if (!scsi_tgt_priv_data)
  4016. return -ENOMEM;
  4017. starget->hostdata = scsi_tgt_priv_data;
  4018. spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
  4019. if (starget->channel == mrioc->scsi_device_channel) {
  4020. tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
  4021. if (tgt_dev && !tgt_dev->is_hidden)
  4022. update_stgt_priv_data = true;
  4023. else
  4024. retval = -ENXIO;
  4025. } else if (mrioc->sas_transport_enabled && !starget->channel) {
  4026. rphy = dev_to_rphy(starget->dev.parent);
  4027. tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
  4028. rphy->identify.sas_address, rphy);
  4029. if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl &&
  4030. (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA))
  4031. update_stgt_priv_data = true;
  4032. else
  4033. retval = -ENXIO;
  4034. }
  4035. if (update_stgt_priv_data) {
  4036. scsi_tgt_priv_data->starget = starget;
  4037. scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
  4038. scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
  4039. scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
  4040. scsi_tgt_priv_data->tgt_dev = tgt_dev;
  4041. tgt_dev->starget = starget;
  4042. atomic_set(&scsi_tgt_priv_data->block_io, 0);
  4043. retval = 0;
  4044. scsi_tgt_priv_data->io_throttle_enabled =
  4045. tgt_dev->io_throttle_enabled;
  4046. if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
  4047. scsi_tgt_priv_data->throttle_group =
  4048. tgt_dev->dev_spec.vd_inf.tg;
  4049. }
  4050. spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
  4051. return retval;
  4052. }
  4053. /**
  4054. * mpi3mr_check_return_unmap - Whether an unmap is allowed
  4055. * @mrioc: Adapter instance reference
  4056. * @scmd: SCSI Command reference
  4057. *
  4058. * The controller hardware cannot handle certain unmap commands
  4059. * for NVMe drives, this routine checks those and return true
  4060. * and completes the SCSI command with proper status and sense
  4061. * data.
  4062. *
  4063. * Return: TRUE for not allowed unmap, FALSE otherwise.
  4064. */
  4065. static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
  4066. struct scsi_cmnd *scmd)
  4067. {
  4068. unsigned char *buf;
  4069. u16 param_len, desc_len, trunc_param_len;
  4070. trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7);
  4071. if (mrioc->pdev->revision) {
  4072. if ((param_len > 24) && ((param_len - 8) & 0xF)) {
  4073. trunc_param_len -= (param_len - 8) & 0xF;
  4074. dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
  4075. dprint_scsi_err(mrioc,
  4076. "truncating param_len from (%d) to (%d)\n",
  4077. param_len, trunc_param_len);
  4078. put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
  4079. dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
  4080. }
  4081. return false;
  4082. }
  4083. if (!param_len) {
  4084. ioc_warn(mrioc,
  4085. "%s: cdb received with zero parameter length\n",
  4086. __func__);
  4087. scsi_print_command(scmd);
  4088. scmd->result = DID_OK << 16;
  4089. scsi_done(scmd);
  4090. return true;
  4091. }
  4092. if (param_len < 24) {
  4093. ioc_warn(mrioc,
  4094. "%s: cdb received with invalid param_len: %d\n",
  4095. __func__, param_len);
  4096. scsi_print_command(scmd);
  4097. scmd->result = SAM_STAT_CHECK_CONDITION;
  4098. scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
  4099. 0x1A, 0);
  4100. scsi_done(scmd);
  4101. return true;
  4102. }
  4103. if (param_len != scsi_bufflen(scmd)) {
  4104. ioc_warn(mrioc,
  4105. "%s: cdb received with param_len: %d bufflen: %d\n",
  4106. __func__, param_len, scsi_bufflen(scmd));
  4107. scsi_print_command(scmd);
  4108. scmd->result = SAM_STAT_CHECK_CONDITION;
  4109. scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
  4110. 0x1A, 0);
  4111. scsi_done(scmd);
  4112. return true;
  4113. }
  4114. buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC);
  4115. if (!buf) {
  4116. scsi_print_command(scmd);
  4117. scmd->result = SAM_STAT_CHECK_CONDITION;
  4118. scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
  4119. 0x55, 0x03);
  4120. scsi_done(scmd);
  4121. return true;
  4122. }
  4123. scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
  4124. desc_len = get_unaligned_be16(&buf[2]);
  4125. if (desc_len < 16) {
  4126. ioc_warn(mrioc,
  4127. "%s: Invalid descriptor length in param list: %d\n",
  4128. __func__, desc_len);
  4129. scsi_print_command(scmd);
  4130. scmd->result = SAM_STAT_CHECK_CONDITION;
  4131. scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
  4132. 0x26, 0);
  4133. scsi_done(scmd);
  4134. kfree(buf);
  4135. return true;
  4136. }
  4137. if (param_len > (desc_len + 8)) {
  4138. trunc_param_len = desc_len + 8;
  4139. scsi_print_command(scmd);
  4140. dprint_scsi_err(mrioc,
  4141. "truncating param_len(%d) to desc_len+8(%d)\n",
  4142. param_len, trunc_param_len);
  4143. put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
  4144. scsi_print_command(scmd);
  4145. }
  4146. kfree(buf);
  4147. return false;
  4148. }
  4149. /**
  4150. * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown
  4151. * @scmd: SCSI Command reference
  4152. *
  4153. * Checks whether a cdb is allowed during shutdown or not.
  4154. *
  4155. * Return: TRUE for allowed commands, FALSE otherwise.
  4156. */
  4157. inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd)
  4158. {
  4159. switch (scmd->cmnd[0]) {
  4160. case SYNCHRONIZE_CACHE:
  4161. case START_STOP:
  4162. return true;
  4163. default:
  4164. return false;
  4165. }
  4166. }
  4167. /**
  4168. * mpi3mr_qcmd - I/O request despatcher
  4169. * @shost: SCSI Host reference
  4170. * @scmd: SCSI Command reference
  4171. *
  4172. * Issues the SCSI Command as an MPI3 request.
  4173. *
  4174. * Return: 0 on successful queueing of the request or if the
  4175. * request is completed with failure.
  4176. * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy.
  4177. * SCSI_MLQUEUE_HOST_BUSY when the host queue is full.
  4178. */
  4179. static int mpi3mr_qcmd(struct Scsi_Host *shost,
  4180. struct scsi_cmnd *scmd)
  4181. {
  4182. struct mpi3mr_ioc *mrioc = shost_priv(shost);
  4183. struct mpi3mr_stgt_priv_data *stgt_priv_data;
  4184. struct mpi3mr_sdev_priv_data *sdev_priv_data;
  4185. struct scmd_priv *scmd_priv_data = NULL;
  4186. struct mpi3_scsi_io_request *scsiio_req = NULL;
  4187. struct op_req_qinfo *op_req_q = NULL;
  4188. int retval = 0;
  4189. u16 dev_handle;
  4190. u16 host_tag;
  4191. u32 scsiio_flags = 0, data_len_blks = 0;
  4192. struct request *rq = scsi_cmd_to_rq(scmd);
  4193. int iprio_class;
  4194. u8 is_pcie_dev = 0;
  4195. u32 tracked_io_sz = 0;
  4196. u32 ioc_pend_data_len = 0, tg_pend_data_len = 0;
  4197. struct mpi3mr_throttle_group_info *tg = NULL;
  4198. if (mrioc->unrecoverable) {
  4199. scmd->result = DID_ERROR << 16;
  4200. scsi_done(scmd);
  4201. goto out;
  4202. }
  4203. sdev_priv_data = scmd->device->hostdata;
  4204. if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
  4205. scmd->result = DID_NO_CONNECT << 16;
  4206. scsi_done(scmd);
  4207. goto out;
  4208. }
  4209. if (mrioc->stop_drv_processing &&
  4210. !(mpi3mr_allow_scmd_to_fw(scmd))) {
  4211. scmd->result = DID_NO_CONNECT << 16;
  4212. scsi_done(scmd);
  4213. goto out;
  4214. }
  4215. if (mrioc->reset_in_progress) {
  4216. retval = SCSI_MLQUEUE_HOST_BUSY;
  4217. goto out;
  4218. }
  4219. stgt_priv_data = sdev_priv_data->tgt_priv_data;
  4220. if (atomic_read(&stgt_priv_data->block_io)) {
  4221. if (mrioc->stop_drv_processing) {
  4222. scmd->result = DID_NO_CONNECT << 16;
  4223. scsi_done(scmd);
  4224. goto out;
  4225. }
  4226. retval = SCSI_MLQUEUE_DEVICE_BUSY;
  4227. goto out;
  4228. }
  4229. dev_handle = stgt_priv_data->dev_handle;
  4230. if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
  4231. scmd->result = DID_NO_CONNECT << 16;
  4232. scsi_done(scmd);
  4233. goto out;
  4234. }
  4235. if (stgt_priv_data->dev_removed) {
  4236. scmd->result = DID_NO_CONNECT << 16;
  4237. scsi_done(scmd);
  4238. goto out;
  4239. }
  4240. if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
  4241. is_pcie_dev = 1;
  4242. if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev &&
  4243. (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
  4244. mpi3mr_check_return_unmap(mrioc, scmd))
  4245. goto out;
  4246. host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd);
  4247. if (host_tag == MPI3MR_HOSTTAG_INVALID) {
  4248. scmd->result = DID_ERROR << 16;
  4249. scsi_done(scmd);
  4250. goto out;
  4251. }
  4252. if (scmd->sc_data_direction == DMA_FROM_DEVICE)
  4253. scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
  4254. else if (scmd->sc_data_direction == DMA_TO_DEVICE)
  4255. scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
  4256. else
  4257. scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
  4258. scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
  4259. if (sdev_priv_data->ncq_prio_enable) {
  4260. iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
  4261. if (iprio_class == IOPRIO_CLASS_RT)
  4262. scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT;
  4263. }
  4264. if (scmd->cmd_len > 16)
  4265. scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
  4266. scmd_priv_data = scsi_cmd_priv(scmd);
  4267. memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
  4268. scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req;
  4269. scsiio_req->function = MPI3_FUNCTION_SCSI_IO;
  4270. scsiio_req->host_tag = cpu_to_le16(host_tag);
  4271. mpi3mr_setup_eedp(mrioc, scmd, scsiio_req);
  4272. memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
  4273. scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd));
  4274. scsiio_req->dev_handle = cpu_to_le16(dev_handle);
  4275. scsiio_req->flags = cpu_to_le32(scsiio_flags);
  4276. int_to_scsilun(sdev_priv_data->lun_id,
  4277. (struct scsi_lun *)scsiio_req->lun);
  4278. if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) {
  4279. mpi3mr_clear_scmd_priv(mrioc, scmd);
  4280. retval = SCSI_MLQUEUE_HOST_BUSY;
  4281. goto out;
  4282. }
  4283. op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
  4284. data_len_blks = scsi_bufflen(scmd) >> 9;
  4285. if ((data_len_blks >= mrioc->io_throttle_data_length) &&
  4286. stgt_priv_data->io_throttle_enabled) {
  4287. tracked_io_sz = data_len_blks;
  4288. tg = stgt_priv_data->throttle_group;
  4289. if (tg) {
  4290. ioc_pend_data_len = atomic_add_return(data_len_blks,
  4291. &mrioc->pend_large_data_sz);
  4292. tg_pend_data_len = atomic_add_return(data_len_blks,
  4293. &tg->pend_large_data_sz);
  4294. if (!tg->io_divert && ((ioc_pend_data_len >=
  4295. mrioc->io_throttle_high) ||
  4296. (tg_pend_data_len >= tg->high))) {
  4297. tg->io_divert = 1;
  4298. tg->need_qd_reduction = 1;
  4299. mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc,
  4300. tg, 1);
  4301. mpi3mr_queue_qd_reduction_event(mrioc, tg);
  4302. }
  4303. } else {
  4304. ioc_pend_data_len = atomic_add_return(data_len_blks,
  4305. &mrioc->pend_large_data_sz);
  4306. if (ioc_pend_data_len >= mrioc->io_throttle_high)
  4307. stgt_priv_data->io_divert = 1;
  4308. }
  4309. }
  4310. if (stgt_priv_data->io_divert) {
  4311. scsiio_req->msg_flags |=
  4312. MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
  4313. scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING;
  4314. }
  4315. scsiio_req->flags = cpu_to_le32(scsiio_flags);
  4316. if (mpi3mr_op_request_post(mrioc, op_req_q,
  4317. scmd_priv_data->mpi3mr_scsiio_req)) {
  4318. mpi3mr_clear_scmd_priv(mrioc, scmd);
  4319. retval = SCSI_MLQUEUE_HOST_BUSY;
  4320. if (tracked_io_sz) {
  4321. atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz);
  4322. if (tg)
  4323. atomic_sub(tracked_io_sz,
  4324. &tg->pend_large_data_sz);
  4325. }
  4326. goto out;
  4327. }
  4328. out:
  4329. return retval;
  4330. }
  4331. static struct scsi_host_template mpi3mr_driver_template = {
  4332. .module = THIS_MODULE,
  4333. .name = "MPI3 Storage Controller",
  4334. .proc_name = MPI3MR_DRIVER_NAME,
  4335. .queuecommand = mpi3mr_qcmd,
  4336. .target_alloc = mpi3mr_target_alloc,
  4337. .slave_alloc = mpi3mr_slave_alloc,
  4338. .slave_configure = mpi3mr_slave_configure,
  4339. .target_destroy = mpi3mr_target_destroy,
  4340. .slave_destroy = mpi3mr_slave_destroy,
  4341. .scan_finished = mpi3mr_scan_finished,
  4342. .scan_start = mpi3mr_scan_start,
  4343. .change_queue_depth = mpi3mr_change_queue_depth,
  4344. .eh_device_reset_handler = mpi3mr_eh_dev_reset,
  4345. .eh_target_reset_handler = mpi3mr_eh_target_reset,
  4346. .eh_host_reset_handler = mpi3mr_eh_host_reset,
  4347. .bios_param = mpi3mr_bios_param,
  4348. .map_queues = mpi3mr_map_queues,
  4349. .mq_poll = mpi3mr_blk_mq_poll,
  4350. .no_write_same = 1,
  4351. .can_queue = 1,
  4352. .this_id = -1,
  4353. .sg_tablesize = MPI3MR_SG_DEPTH,
  4354. /* max xfer supported is 1M (2K in 512 byte sized sectors)
  4355. */
  4356. .max_sectors = 2048,
  4357. .cmd_per_lun = MPI3MR_MAX_CMDS_LUN,
  4358. .max_segment_size = 0xffffffff,
  4359. .track_queue_depth = 1,
  4360. .cmd_size = sizeof(struct scmd_priv),
  4361. .shost_groups = mpi3mr_host_groups,
  4362. .sdev_groups = mpi3mr_dev_groups,
  4363. };
  4364. /**
  4365. * mpi3mr_init_drv_cmd - Initialize internal command tracker
  4366. * @cmdptr: Internal command tracker
  4367. * @host_tag: Host tag used for the specific command
  4368. *
  4369. * Initialize the internal command tracker structure with
  4370. * specified host tag.
  4371. *
  4372. * Return: Nothing.
  4373. */
  4374. static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr,
  4375. u16 host_tag)
  4376. {
  4377. mutex_init(&cmdptr->mutex);
  4378. cmdptr->reply = NULL;
  4379. cmdptr->state = MPI3MR_CMD_NOTUSED;
  4380. cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
  4381. cmdptr->host_tag = host_tag;
  4382. }
  4383. /**
  4384. * osintfc_mrioc_security_status -Check controller secure status
  4385. * @pdev: PCI device instance
  4386. *
  4387. * Read the Device Serial Number capability from PCI config
  4388. * space and decide whether the controller is secure or not.
  4389. *
  4390. * Return: 0 on success, non-zero on failure.
  4391. */
  4392. static int
  4393. osintfc_mrioc_security_status(struct pci_dev *pdev)
  4394. {
  4395. u32 cap_data;
  4396. int base;
  4397. u32 ctlr_status;
  4398. u32 debug_status;
  4399. int retval = 0;
  4400. base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
  4401. if (!base) {
  4402. dev_err(&pdev->dev,
  4403. "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__);
  4404. return -1;
  4405. }
  4406. pci_read_config_dword(pdev, base + 4, &cap_data);
  4407. debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK;
  4408. ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK;
  4409. switch (ctlr_status) {
  4410. case MPI3MR_INVALID_DEVICE:
  4411. dev_err(&pdev->dev,
  4412. "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
  4413. __func__, pdev->device, pdev->subsystem_vendor,
  4414. pdev->subsystem_device);
  4415. retval = -1;
  4416. break;
  4417. case MPI3MR_CONFIG_SECURE_DEVICE:
  4418. if (!debug_status)
  4419. dev_info(&pdev->dev,
  4420. "%s: Config secure ctlr is detected\n",
  4421. __func__);
  4422. break;
  4423. case MPI3MR_HARD_SECURE_DEVICE:
  4424. break;
  4425. case MPI3MR_TAMPERED_DEVICE:
  4426. dev_err(&pdev->dev,
  4427. "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
  4428. __func__, pdev->device, pdev->subsystem_vendor,
  4429. pdev->subsystem_device);
  4430. retval = -1;
  4431. break;
  4432. default:
  4433. retval = -1;
  4434. break;
  4435. }
  4436. if (!retval && debug_status) {
  4437. dev_err(&pdev->dev,
  4438. "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
  4439. __func__, pdev->device, pdev->subsystem_vendor,
  4440. pdev->subsystem_device);
  4441. retval = -1;
  4442. }
  4443. return retval;
  4444. }
  4445. /**
  4446. * mpi3mr_probe - PCI probe callback
  4447. * @pdev: PCI device instance
  4448. * @id: PCI device ID details
  4449. *
  4450. * controller initialization routine. Checks the security status
  4451. * of the controller and if it is invalid or tampered return the
  4452. * probe without initializing the controller. Otherwise,
  4453. * allocate per adapter instance through shost_priv and
  4454. * initialize controller specific data structures, initializae
  4455. * the controller hardware, add shost to the SCSI subsystem.
  4456. *
  4457. * Return: 0 on success, non-zero on failure.
  4458. */
  4459. static int
  4460. mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  4461. {
  4462. struct mpi3mr_ioc *mrioc = NULL;
  4463. struct Scsi_Host *shost = NULL;
  4464. int retval = 0, i;
  4465. if (osintfc_mrioc_security_status(pdev)) {
  4466. warn_non_secure_ctlr = 1;
  4467. return 1; /* For Invalid and Tampered device */
  4468. }
  4469. shost = scsi_host_alloc(&mpi3mr_driver_template,
  4470. sizeof(struct mpi3mr_ioc));
  4471. if (!shost) {
  4472. retval = -ENODEV;
  4473. goto shost_failed;
  4474. }
  4475. mrioc = shost_priv(shost);
  4476. mrioc->id = mrioc_ids++;
  4477. sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME);
  4478. sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id);
  4479. INIT_LIST_HEAD(&mrioc->list);
  4480. spin_lock(&mrioc_list_lock);
  4481. list_add_tail(&mrioc->list, &mrioc_list);
  4482. spin_unlock(&mrioc_list_lock);
  4483. spin_lock_init(&mrioc->admin_req_lock);
  4484. spin_lock_init(&mrioc->reply_free_queue_lock);
  4485. spin_lock_init(&mrioc->sbq_lock);
  4486. spin_lock_init(&mrioc->fwevt_lock);
  4487. spin_lock_init(&mrioc->tgtdev_lock);
  4488. spin_lock_init(&mrioc->watchdog_lock);
  4489. spin_lock_init(&mrioc->chain_buf_lock);
  4490. spin_lock_init(&mrioc->sas_node_lock);
  4491. INIT_LIST_HEAD(&mrioc->fwevt_list);
  4492. INIT_LIST_HEAD(&mrioc->tgtdev_list);
  4493. INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
  4494. INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list);
  4495. INIT_LIST_HEAD(&mrioc->sas_expander_list);
  4496. INIT_LIST_HEAD(&mrioc->hba_port_table_list);
  4497. INIT_LIST_HEAD(&mrioc->enclosure_list);
  4498. mutex_init(&mrioc->reset_mutex);
  4499. mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
  4500. mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
  4501. mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS);
  4502. mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS);
  4503. mpi3mr_init_drv_cmd(&mrioc->transport_cmds,
  4504. MPI3MR_HOSTTAG_TRANSPORT_CMDS);
  4505. for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
  4506. mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
  4507. MPI3MR_HOSTTAG_DEVRMCMD_MIN + i);
  4508. for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
  4509. mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i],
  4510. MPI3MR_HOSTTAG_EVTACKCMD_MIN + i);
  4511. if (pdev->revision)
  4512. mrioc->enable_segqueue = true;
  4513. init_waitqueue_head(&mrioc->reset_waitq);
  4514. mrioc->logging_level = logging_level;
  4515. mrioc->shost = shost;
  4516. mrioc->pdev = pdev;
  4517. mrioc->stop_bsgs = 1;
  4518. /* init shost parameters */
  4519. shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
  4520. shost->max_lun = -1;
  4521. shost->unique_id = mrioc->id;
  4522. shost->max_channel = 0;
  4523. shost->max_id = 0xFFFFFFFF;
  4524. shost->host_tagset = 1;
  4525. if (prot_mask >= 0)
  4526. scsi_host_set_prot(shost, prot_mask);
  4527. else {
  4528. prot_mask = SHOST_DIF_TYPE1_PROTECTION
  4529. | SHOST_DIF_TYPE2_PROTECTION
  4530. | SHOST_DIF_TYPE3_PROTECTION;
  4531. scsi_host_set_prot(shost, prot_mask);
  4532. }
  4533. ioc_info(mrioc,
  4534. "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n",
  4535. __func__,
  4536. (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
  4537. (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
  4538. (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
  4539. (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
  4540. (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
  4541. (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
  4542. (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
  4543. if (prot_guard_mask)
  4544. scsi_host_set_guard(shost, (prot_guard_mask & 3));
  4545. else
  4546. scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
  4547. snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name),
  4548. "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id);
  4549. mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
  4550. mrioc->fwevt_worker_name, 0);
  4551. if (!mrioc->fwevt_worker_thread) {
  4552. ioc_err(mrioc, "failure at %s:%d/%s()!\n",
  4553. __FILE__, __LINE__, __func__);
  4554. retval = -ENODEV;
  4555. goto fwevtthread_failed;
  4556. }
  4557. mrioc->is_driver_loading = 1;
  4558. mrioc->cpu_count = num_online_cpus();
  4559. if (mpi3mr_setup_resources(mrioc)) {
  4560. ioc_err(mrioc, "setup resources failed\n");
  4561. retval = -ENODEV;
  4562. goto resource_alloc_failed;
  4563. }
  4564. if (mpi3mr_init_ioc(mrioc)) {
  4565. ioc_err(mrioc, "initializing IOC failed\n");
  4566. retval = -ENODEV;
  4567. goto init_ioc_failed;
  4568. }
  4569. shost->nr_hw_queues = mrioc->num_op_reply_q;
  4570. if (mrioc->active_poll_qcount)
  4571. shost->nr_maps = 3;
  4572. shost->can_queue = mrioc->max_host_ios;
  4573. shost->sg_tablesize = MPI3MR_SG_DEPTH;
  4574. shost->max_id = mrioc->facts.max_perids + 1;
  4575. retval = scsi_add_host(shost, &pdev->dev);
  4576. if (retval) {
  4577. ioc_err(mrioc, "failure at %s:%d/%s()!\n",
  4578. __FILE__, __LINE__, __func__);
  4579. goto addhost_failed;
  4580. }
  4581. scsi_scan_host(shost);
  4582. mpi3mr_bsg_init(mrioc);
  4583. return retval;
  4584. addhost_failed:
  4585. mpi3mr_stop_watchdog(mrioc);
  4586. mpi3mr_cleanup_ioc(mrioc);
  4587. init_ioc_failed:
  4588. mpi3mr_free_mem(mrioc);
  4589. mpi3mr_cleanup_resources(mrioc);
  4590. resource_alloc_failed:
  4591. destroy_workqueue(mrioc->fwevt_worker_thread);
  4592. fwevtthread_failed:
  4593. spin_lock(&mrioc_list_lock);
  4594. list_del(&mrioc->list);
  4595. spin_unlock(&mrioc_list_lock);
  4596. scsi_host_put(shost);
  4597. shost_failed:
  4598. return retval;
  4599. }
  4600. /**
  4601. * mpi3mr_remove - PCI remove callback
  4602. * @pdev: PCI device instance
  4603. *
  4604. * Cleanup the IOC by issuing MUR and shutdown notification.
  4605. * Free up all memory and resources associated with the
  4606. * controllerand target devices, unregister the shost.
  4607. *
  4608. * Return: Nothing.
  4609. */
  4610. static void mpi3mr_remove(struct pci_dev *pdev)
  4611. {
  4612. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  4613. struct mpi3mr_ioc *mrioc;
  4614. struct workqueue_struct *wq;
  4615. unsigned long flags;
  4616. struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
  4617. struct mpi3mr_hba_port *port, *hba_port_next;
  4618. struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
  4619. if (!shost)
  4620. return;
  4621. mrioc = shost_priv(shost);
  4622. while (mrioc->reset_in_progress || mrioc->is_driver_loading)
  4623. ssleep(1);
  4624. if (!pci_device_is_present(mrioc->pdev)) {
  4625. mrioc->unrecoverable = 1;
  4626. mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
  4627. }
  4628. mpi3mr_bsg_exit(mrioc);
  4629. mrioc->stop_drv_processing = 1;
  4630. mpi3mr_cleanup_fwevt_list(mrioc);
  4631. spin_lock_irqsave(&mrioc->fwevt_lock, flags);
  4632. wq = mrioc->fwevt_worker_thread;
  4633. mrioc->fwevt_worker_thread = NULL;
  4634. spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
  4635. if (wq)
  4636. destroy_workqueue(wq);
  4637. if (mrioc->sas_transport_enabled)
  4638. sas_remove_host(shost);
  4639. else
  4640. scsi_remove_host(shost);
  4641. list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
  4642. list) {
  4643. mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
  4644. mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
  4645. mpi3mr_tgtdev_put(tgtdev);
  4646. }
  4647. mpi3mr_stop_watchdog(mrioc);
  4648. mpi3mr_cleanup_ioc(mrioc);
  4649. mpi3mr_free_mem(mrioc);
  4650. mpi3mr_cleanup_resources(mrioc);
  4651. spin_lock_irqsave(&mrioc->sas_node_lock, flags);
  4652. list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
  4653. &mrioc->sas_expander_list, list) {
  4654. spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
  4655. mpi3mr_expander_node_remove(mrioc, sas_expander);
  4656. spin_lock_irqsave(&mrioc->sas_node_lock, flags);
  4657. }
  4658. list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) {
  4659. ioc_info(mrioc,
  4660. "removing hba_port entry: %p port: %d from hba_port list\n",
  4661. port, port->port_id);
  4662. list_del(&port->list);
  4663. kfree(port);
  4664. }
  4665. spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
  4666. if (mrioc->sas_hba.num_phys) {
  4667. kfree(mrioc->sas_hba.phy);
  4668. mrioc->sas_hba.phy = NULL;
  4669. mrioc->sas_hba.num_phys = 0;
  4670. }
  4671. spin_lock(&mrioc_list_lock);
  4672. list_del(&mrioc->list);
  4673. spin_unlock(&mrioc_list_lock);
  4674. scsi_host_put(shost);
  4675. }
  4676. /**
  4677. * mpi3mr_shutdown - PCI shutdown callback
  4678. * @pdev: PCI device instance
  4679. *
  4680. * Free up all memory and resources associated with the
  4681. * controller
  4682. *
  4683. * Return: Nothing.
  4684. */
  4685. static void mpi3mr_shutdown(struct pci_dev *pdev)
  4686. {
  4687. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  4688. struct mpi3mr_ioc *mrioc;
  4689. struct workqueue_struct *wq;
  4690. unsigned long flags;
  4691. if (!shost)
  4692. return;
  4693. mrioc = shost_priv(shost);
  4694. while (mrioc->reset_in_progress || mrioc->is_driver_loading)
  4695. ssleep(1);
  4696. mrioc->stop_drv_processing = 1;
  4697. mpi3mr_cleanup_fwevt_list(mrioc);
  4698. spin_lock_irqsave(&mrioc->fwevt_lock, flags);
  4699. wq = mrioc->fwevt_worker_thread;
  4700. mrioc->fwevt_worker_thread = NULL;
  4701. spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
  4702. if (wq)
  4703. destroy_workqueue(wq);
  4704. mpi3mr_stop_watchdog(mrioc);
  4705. mpi3mr_cleanup_ioc(mrioc);
  4706. mpi3mr_cleanup_resources(mrioc);
  4707. }
  4708. /**
  4709. * mpi3mr_suspend - PCI power management suspend callback
  4710. * @dev: Device struct
  4711. *
  4712. * Change the power state to the given value and cleanup the IOC
  4713. * by issuing MUR and shutdown notification
  4714. *
  4715. * Return: 0 always.
  4716. */
  4717. static int __maybe_unused
  4718. mpi3mr_suspend(struct device *dev)
  4719. {
  4720. struct pci_dev *pdev = to_pci_dev(dev);
  4721. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  4722. struct mpi3mr_ioc *mrioc;
  4723. if (!shost)
  4724. return 0;
  4725. mrioc = shost_priv(shost);
  4726. while (mrioc->reset_in_progress || mrioc->is_driver_loading)
  4727. ssleep(1);
  4728. mrioc->stop_drv_processing = 1;
  4729. mpi3mr_cleanup_fwevt_list(mrioc);
  4730. scsi_block_requests(shost);
  4731. mpi3mr_stop_watchdog(mrioc);
  4732. mpi3mr_cleanup_ioc(mrioc);
  4733. ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n",
  4734. pdev, pci_name(pdev));
  4735. mpi3mr_cleanup_resources(mrioc);
  4736. return 0;
  4737. }
  4738. /**
  4739. * mpi3mr_resume - PCI power management resume callback
  4740. * @dev: Device struct
  4741. *
  4742. * Restore the power state to D0 and reinitialize the controller
  4743. * and resume I/O operations to the target devices
  4744. *
  4745. * Return: 0 on success, non-zero on failure
  4746. */
  4747. static int __maybe_unused
  4748. mpi3mr_resume(struct device *dev)
  4749. {
  4750. struct pci_dev *pdev = to_pci_dev(dev);
  4751. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  4752. struct mpi3mr_ioc *mrioc;
  4753. pci_power_t device_state = pdev->current_state;
  4754. int r;
  4755. if (!shost)
  4756. return 0;
  4757. mrioc = shost_priv(shost);
  4758. ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
  4759. pdev, pci_name(pdev), device_state);
  4760. mrioc->pdev = pdev;
  4761. mrioc->cpu_count = num_online_cpus();
  4762. r = mpi3mr_setup_resources(mrioc);
  4763. if (r) {
  4764. ioc_info(mrioc, "%s: Setup resources failed[%d]\n",
  4765. __func__, r);
  4766. return r;
  4767. }
  4768. mrioc->stop_drv_processing = 0;
  4769. mpi3mr_invalidate_devhandles(mrioc);
  4770. mpi3mr_free_enclosure_list(mrioc);
  4771. mpi3mr_memset_buffers(mrioc);
  4772. r = mpi3mr_reinit_ioc(mrioc, 1);
  4773. if (r) {
  4774. ioc_err(mrioc, "resuming controller failed[%d]\n", r);
  4775. return r;
  4776. }
  4777. ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
  4778. scsi_unblock_requests(shost);
  4779. mrioc->device_refresh_on = 0;
  4780. mpi3mr_start_watchdog(mrioc);
  4781. return 0;
  4782. }
  4783. static const struct pci_device_id mpi3mr_pci_id_table[] = {
  4784. {
  4785. PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
  4786. MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID)
  4787. },
  4788. { 0 }
  4789. };
  4790. MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
  4791. static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume);
  4792. static struct pci_driver mpi3mr_pci_driver = {
  4793. .name = MPI3MR_DRIVER_NAME,
  4794. .id_table = mpi3mr_pci_id_table,
  4795. .probe = mpi3mr_probe,
  4796. .remove = mpi3mr_remove,
  4797. .shutdown = mpi3mr_shutdown,
  4798. .driver.pm = &mpi3mr_pm_ops,
  4799. };
  4800. static ssize_t event_counter_show(struct device_driver *dd, char *buf)
  4801. {
  4802. return sprintf(buf, "%llu\n", atomic64_read(&event_counter));
  4803. }
  4804. static DRIVER_ATTR_RO(event_counter);
  4805. static int __init mpi3mr_init(void)
  4806. {
  4807. int ret_val;
  4808. pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME,
  4809. MPI3MR_DRIVER_VERSION);
  4810. mpi3mr_transport_template =
  4811. sas_attach_transport(&mpi3mr_transport_functions);
  4812. if (!mpi3mr_transport_template) {
  4813. pr_err("%s failed to load due to sas transport attach failure\n",
  4814. MPI3MR_DRIVER_NAME);
  4815. return -ENODEV;
  4816. }
  4817. ret_val = pci_register_driver(&mpi3mr_pci_driver);
  4818. if (ret_val) {
  4819. pr_err("%s failed to load due to pci register driver failure\n",
  4820. MPI3MR_DRIVER_NAME);
  4821. goto err_pci_reg_fail;
  4822. }
  4823. ret_val = driver_create_file(&mpi3mr_pci_driver.driver,
  4824. &driver_attr_event_counter);
  4825. if (ret_val)
  4826. goto err_event_counter;
  4827. return ret_val;
  4828. err_event_counter:
  4829. pci_unregister_driver(&mpi3mr_pci_driver);
  4830. err_pci_reg_fail:
  4831. sas_release_transport(mpi3mr_transport_template);
  4832. return ret_val;
  4833. }
  4834. static void __exit mpi3mr_exit(void)
  4835. {
  4836. if (warn_non_secure_ctlr)
  4837. pr_warn(
  4838. "Unloading %s version %s while managing a non secure controller\n",
  4839. MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION);
  4840. else
  4841. pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME,
  4842. MPI3MR_DRIVER_VERSION);
  4843. driver_remove_file(&mpi3mr_pci_driver.driver,
  4844. &driver_attr_event_counter);
  4845. pci_unregister_driver(&mpi3mr_pci_driver);
  4846. sas_release_transport(mpi3mr_transport_template);
  4847. }
  4848. module_init(mpi3mr_init);
  4849. module_exit(mpi3mr_exit);