qla_os.c 232 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic Fibre Channel HBA Driver
  4. * Copyright (c) 2003-2014 QLogic Corporation
  5. */
  6. #include "qla_def.h"
  7. #include <linux/moduleparam.h>
  8. #include <linux/vmalloc.h>
  9. #include <linux/delay.h>
  10. #include <linux/kthread.h>
  11. #include <linux/mutex.h>
  12. #include <linux/kobject.h>
  13. #include <linux/slab.h>
  14. #include <linux/blk-mq-pci.h>
  15. #include <linux/refcount.h>
  16. #include <linux/crash_dump.h>
  17. #include <linux/trace_events.h>
  18. #include <linux/trace.h>
  19. #include <scsi/scsi_tcq.h>
  20. #include <scsi/scsicam.h>
  21. #include <scsi/scsi_transport.h>
  22. #include <scsi/scsi_transport_fc.h>
  23. #include "qla_target.h"
  24. /*
  25. * Driver version
  26. */
  27. char qla2x00_version_str[40];
  28. static int apidev_major;
  29. /*
  30. * SRB allocation cache
  31. */
  32. struct kmem_cache *srb_cachep;
  33. static struct trace_array *qla_trc_array;
  34. int ql2xfulldump_on_mpifail;
  35. module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
  36. MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
  37. "Set this to take full dump on MPI hang.");
  38. int ql2xenforce_iocb_limit = 2;
  39. module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
  40. MODULE_PARM_DESC(ql2xenforce_iocb_limit,
  41. "Enforce IOCB throttling, to avoid FW congestion. (default: 2) "
  42. "1: track usage per queue, 2: track usage per adapter");
  43. /*
  44. * CT6 CTX allocation cache
  45. */
  46. static struct kmem_cache *ctx_cachep;
  47. /*
  48. * error level for logging
  49. */
  50. uint ql_errlev = 0x8001;
  51. int ql2xsecenable;
  52. module_param(ql2xsecenable, int, S_IRUGO);
  53. MODULE_PARM_DESC(ql2xsecenable,
  54. "Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled.");
  55. static int ql2xenableclass2;
  56. module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
  57. MODULE_PARM_DESC(ql2xenableclass2,
  58. "Specify if Class 2 operations are supported from the very "
  59. "beginning. Default is 0 - class 2 not supported.");
  60. int ql2xlogintimeout = 20;
  61. module_param(ql2xlogintimeout, int, S_IRUGO);
  62. MODULE_PARM_DESC(ql2xlogintimeout,
  63. "Login timeout value in seconds.");
  64. int qlport_down_retry;
  65. module_param(qlport_down_retry, int, S_IRUGO);
  66. MODULE_PARM_DESC(qlport_down_retry,
  67. "Maximum number of command retries to a port that returns "
  68. "a PORT-DOWN status.");
  69. int ql2xplogiabsentdevice;
  70. module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
  71. MODULE_PARM_DESC(ql2xplogiabsentdevice,
  72. "Option to enable PLOGI to devices that are not present after "
  73. "a Fabric scan. This is needed for several broken switches. "
  74. "Default is 0 - no PLOGI. 1 - perform PLOGI.");
  75. int ql2xloginretrycount;
  76. module_param(ql2xloginretrycount, int, S_IRUGO);
  77. MODULE_PARM_DESC(ql2xloginretrycount,
  78. "Specify an alternate value for the NVRAM login retry count.");
  79. int ql2xallocfwdump = 1;
  80. module_param(ql2xallocfwdump, int, S_IRUGO);
  81. MODULE_PARM_DESC(ql2xallocfwdump,
  82. "Option to enable allocation of memory for a firmware dump "
  83. "during HBA initialization. Memory allocation requirements "
  84. "vary by ISP type. Default is 1 - allocate memory.");
  85. int ql2xextended_error_logging;
  86. module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
  87. module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
  88. MODULE_PARM_DESC(ql2xextended_error_logging,
  89. "Option to enable extended error logging,\n"
  90. "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n"
  91. "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
  92. "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n"
  93. "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n"
  94. "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n"
  95. "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n"
  96. "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
  97. "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
  98. "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n"
  99. "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n"
  100. "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
  101. "\t\t0x1e400000 - Preferred value for capturing essential "
  102. "debug information (equivalent to old "
  103. "ql2xextended_error_logging=1).\n"
  104. "\t\tDo LOGICAL OR of the value to enable more than one level");
  105. int ql2xextended_error_logging_ktrace = 1;
  106. module_param(ql2xextended_error_logging_ktrace, int, S_IRUGO|S_IWUSR);
  107. MODULE_PARM_DESC(ql2xextended_error_logging_ktrace,
  108. "Same BIT definition as ql2xextended_error_logging, but used to control logging to kernel trace buffer (default=1).\n");
  109. int ql2xshiftctondsd = 6;
  110. module_param(ql2xshiftctondsd, int, S_IRUGO);
  111. MODULE_PARM_DESC(ql2xshiftctondsd,
  112. "Set to control shifting of command type processing "
  113. "based on total number of SG elements.");
  114. int ql2xfdmienable = 1;
  115. module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
  116. module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
  117. MODULE_PARM_DESC(ql2xfdmienable,
  118. "Enables FDMI registrations. "
  119. "0 - no FDMI registrations. "
  120. "1 - provide FDMI registrations (default).");
  121. #define MAX_Q_DEPTH 64
  122. static int ql2xmaxqdepth = MAX_Q_DEPTH;
  123. module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
  124. MODULE_PARM_DESC(ql2xmaxqdepth,
  125. "Maximum queue depth to set for each LUN. "
  126. "Default is 64.");
  127. int ql2xenabledif = 2;
  128. module_param(ql2xenabledif, int, S_IRUGO);
  129. MODULE_PARM_DESC(ql2xenabledif,
  130. " Enable T10-CRC-DIF:\n"
  131. " Default is 2.\n"
  132. " 0 -- No DIF Support\n"
  133. " 1 -- Enable DIF for all types\n"
  134. " 2 -- Enable DIF for all types, except Type 0.\n");
  135. #if (IS_ENABLED(CONFIG_NVME_FC))
  136. int ql2xnvmeenable = 1;
  137. #else
  138. int ql2xnvmeenable;
  139. #endif
  140. module_param(ql2xnvmeenable, int, 0644);
  141. MODULE_PARM_DESC(ql2xnvmeenable,
  142. "Enables NVME support. "
  143. "0 - no NVMe. Default is Y");
  144. int ql2xenablehba_err_chk = 2;
  145. module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
  146. MODULE_PARM_DESC(ql2xenablehba_err_chk,
  147. " Enable T10-CRC-DIF Error isolation by HBA:\n"
  148. " Default is 2.\n"
  149. " 0 -- Error isolation disabled\n"
  150. " 1 -- Error isolation enabled only for DIX Type 0\n"
  151. " 2 -- Error isolation enabled for all Types\n");
  152. int ql2xiidmaenable = 1;
  153. module_param(ql2xiidmaenable, int, S_IRUGO);
  154. MODULE_PARM_DESC(ql2xiidmaenable,
  155. "Enables iIDMA settings "
  156. "Default is 1 - perform iIDMA. 0 - no iIDMA.");
  157. int ql2xmqsupport = 1;
  158. module_param(ql2xmqsupport, int, S_IRUGO);
  159. MODULE_PARM_DESC(ql2xmqsupport,
  160. "Enable on demand multiple queue pairs support "
  161. "Default is 1 for supported. "
  162. "Set it to 0 to turn off mq qpair support.");
  163. int ql2xfwloadbin;
  164. module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
  165. module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
  166. MODULE_PARM_DESC(ql2xfwloadbin,
  167. "Option to specify location from which to load ISP firmware:.\n"
  168. " 2 -- load firmware via the request_firmware() (hotplug).\n"
  169. " interface.\n"
  170. " 1 -- load firmware from flash.\n"
  171. " 0 -- use default semantics.\n");
  172. int ql2xetsenable;
  173. module_param(ql2xetsenable, int, S_IRUGO);
  174. MODULE_PARM_DESC(ql2xetsenable,
  175. "Enables firmware ETS burst."
  176. "Default is 0 - skip ETS enablement.");
  177. int ql2xdbwr = 1;
  178. module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
  179. MODULE_PARM_DESC(ql2xdbwr,
  180. "Option to specify scheme for request queue posting.\n"
  181. " 0 -- Regular doorbell.\n"
  182. " 1 -- CAMRAM doorbell (faster).\n");
  183. int ql2xgffidenable;
  184. module_param(ql2xgffidenable, int, S_IRUGO);
  185. MODULE_PARM_DESC(ql2xgffidenable,
  186. "Enables GFF_ID checks of port type. "
  187. "Default is 0 - Do not use GFF_ID information.");
  188. int ql2xasynctmfenable = 1;
  189. module_param(ql2xasynctmfenable, int, S_IRUGO);
  190. MODULE_PARM_DESC(ql2xasynctmfenable,
  191. "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
  192. "Default is 1 - Issue TM IOCBs via mailbox mechanism.");
  193. int ql2xdontresethba;
  194. module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
  195. MODULE_PARM_DESC(ql2xdontresethba,
  196. "Option to specify reset behaviour.\n"
  197. " 0 (Default) -- Reset on failure.\n"
  198. " 1 -- Do not reset on failure.\n");
  199. uint64_t ql2xmaxlun = MAX_LUNS;
  200. module_param(ql2xmaxlun, ullong, S_IRUGO);
  201. MODULE_PARM_DESC(ql2xmaxlun,
  202. "Defines the maximum LU number to register with the SCSI "
  203. "midlayer. Default is 65535.");
  204. int ql2xmdcapmask = 0x1F;
  205. module_param(ql2xmdcapmask, int, S_IRUGO);
  206. MODULE_PARM_DESC(ql2xmdcapmask,
  207. "Set the Minidump driver capture mask level. "
  208. "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
  209. int ql2xmdenable = 1;
  210. module_param(ql2xmdenable, int, S_IRUGO);
  211. MODULE_PARM_DESC(ql2xmdenable,
  212. "Enable/disable MiniDump. "
  213. "0 - MiniDump disabled. "
  214. "1 (Default) - MiniDump enabled.");
  215. int ql2xexlogins;
  216. module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
  217. MODULE_PARM_DESC(ql2xexlogins,
  218. "Number of extended Logins. "
  219. "0 (Default)- Disabled.");
  220. int ql2xexchoffld = 1024;
  221. module_param(ql2xexchoffld, uint, 0644);
  222. MODULE_PARM_DESC(ql2xexchoffld,
  223. "Number of target exchanges.");
  224. int ql2xiniexchg = 1024;
  225. module_param(ql2xiniexchg, uint, 0644);
  226. MODULE_PARM_DESC(ql2xiniexchg,
  227. "Number of initiator exchanges.");
  228. int ql2xfwholdabts;
  229. module_param(ql2xfwholdabts, int, S_IRUGO);
  230. MODULE_PARM_DESC(ql2xfwholdabts,
  231. "Allow FW to hold status IOCB until ABTS rsp received. "
  232. "0 (Default) Do not set fw option. "
  233. "1 - Set fw option to hold ABTS.");
  234. int ql2xmvasynctoatio = 1;
  235. module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR);
  236. MODULE_PARM_DESC(ql2xmvasynctoatio,
  237. "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ"
  238. "0 (Default). Do not move IOCBs"
  239. "1 - Move IOCBs.");
  240. int ql2xautodetectsfp = 1;
  241. module_param(ql2xautodetectsfp, int, 0444);
  242. MODULE_PARM_DESC(ql2xautodetectsfp,
  243. "Detect SFP range and set appropriate distance.\n"
  244. "1 (Default): Enable\n");
  245. int ql2xenablemsix = 1;
  246. module_param(ql2xenablemsix, int, 0444);
  247. MODULE_PARM_DESC(ql2xenablemsix,
  248. "Set to enable MSI or MSI-X interrupt mechanism.\n"
  249. " Default is 1, enable MSI-X interrupt mechanism.\n"
  250. " 0 -- enable traditional pin-based mechanism.\n"
  251. " 1 -- enable MSI-X interrupt mechanism.\n"
  252. " 2 -- enable MSI interrupt mechanism.\n");
  253. int qla2xuseresexchforels;
  254. module_param(qla2xuseresexchforels, int, 0444);
  255. MODULE_PARM_DESC(qla2xuseresexchforels,
  256. "Reserve 1/2 of emergency exchanges for ELS.\n"
  257. " 0 (default): disabled");
  258. static int ql2xprotmask;
  259. module_param(ql2xprotmask, int, 0644);
  260. MODULE_PARM_DESC(ql2xprotmask,
  261. "Override DIF/DIX protection capabilities mask\n"
  262. "Default is 0 which sets protection mask based on "
  263. "capabilities reported by HBA firmware.\n");
  264. static int ql2xprotguard;
  265. module_param(ql2xprotguard, int, 0644);
  266. MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
  267. " 0 -- Let HBA firmware decide\n"
  268. " 1 -- Force T10 CRC\n"
  269. " 2 -- Force IP checksum\n");
  270. int ql2xdifbundlinginternalbuffers;
  271. module_param(ql2xdifbundlinginternalbuffers, int, 0644);
  272. MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
  273. "Force using internal buffers for DIF information\n"
  274. "0 (Default). Based on check.\n"
  275. "1 Force using internal buffers\n");
  276. int ql2xsmartsan;
  277. module_param(ql2xsmartsan, int, 0444);
  278. module_param_named(smartsan, ql2xsmartsan, int, 0444);
  279. MODULE_PARM_DESC(ql2xsmartsan,
  280. "Send SmartSAN Management Attributes for FDMI Registration."
  281. " Default is 0 - No SmartSAN registration,"
  282. " 1 - Register SmartSAN Management Attributes.");
  283. int ql2xrdpenable;
  284. module_param(ql2xrdpenable, int, 0444);
  285. module_param_named(rdpenable, ql2xrdpenable, int, 0444);
  286. MODULE_PARM_DESC(ql2xrdpenable,
  287. "Enables RDP responses. "
  288. "0 - no RDP responses (default). "
  289. "1 - provide RDP responses.");
  290. int ql2xabts_wait_nvme = 1;
  291. module_param(ql2xabts_wait_nvme, int, 0444);
  292. MODULE_PARM_DESC(ql2xabts_wait_nvme,
  293. "To wait for ABTS response on I/O timeouts for NVMe. (default: 1)");
  294. static u32 ql2xdelay_before_pci_error_handling = 5;
  295. module_param(ql2xdelay_before_pci_error_handling, uint, 0644);
  296. MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling,
  297. "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n");
  298. static void qla2x00_clear_drv_active(struct qla_hw_data *);
  299. static void qla2x00_free_device(scsi_qla_host_t *);
  300. static void qla2xxx_map_queues(struct Scsi_Host *shost);
  301. static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
  302. u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
  303. module_param(ql2xnvme_queues, uint, S_IRUGO);
  304. MODULE_PARM_DESC(ql2xnvme_queues,
  305. "Number of NVMe Queues that can be configured.\n"
  306. "Final value will be min(ql2xnvme_queues, num_cpus,num_chip_queues)\n"
  307. "1 - Minimum number of queues supported\n"
  308. "8 - Default value");
  309. int ql2xfc2target = 1;
  310. module_param(ql2xfc2target, int, 0444);
  311. MODULE_PARM_DESC(qla2xfc2target,
  312. "Enables FC2 Target support. "
  313. "0 - FC2 Target support is disabled. "
  314. "1 - FC2 Target support is enabled (default).");
  315. static struct scsi_transport_template *qla2xxx_transport_template = NULL;
  316. struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
  317. /* TODO Convert to inlines
  318. *
  319. * Timer routines
  320. */
  321. __inline__ void
  322. qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval)
  323. {
  324. timer_setup(&vha->timer, qla2x00_timer, 0);
  325. vha->timer.expires = jiffies + interval * HZ;
  326. add_timer(&vha->timer);
  327. vha->timer_active = 1;
  328. }
  329. static inline void
  330. qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
  331. {
  332. /* Currently used for 82XX only. */
  333. if (vha->device_flags & DFLG_DEV_FAILED) {
  334. ql_dbg(ql_dbg_timer, vha, 0x600d,
  335. "Device in a failed state, returning.\n");
  336. return;
  337. }
  338. mod_timer(&vha->timer, jiffies + interval * HZ);
  339. }
  340. static __inline__ void
  341. qla2x00_stop_timer(scsi_qla_host_t *vha)
  342. {
  343. del_timer_sync(&vha->timer);
  344. vha->timer_active = 0;
  345. }
  346. static int qla2x00_do_dpc(void *data);
  347. static void qla2x00_rst_aen(scsi_qla_host_t *);
  348. static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
  349. struct req_que **, struct rsp_que **);
  350. static void qla2x00_free_fw_dump(struct qla_hw_data *);
  351. static void qla2x00_mem_free(struct qla_hw_data *);
  352. int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
  353. struct qla_qpair *qpair);
  354. /* -------------------------------------------------------------------------- */
  355. static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
  356. struct rsp_que *rsp)
  357. {
  358. struct qla_hw_data *ha = vha->hw;
  359. rsp->qpair = ha->base_qpair;
  360. rsp->req = req;
  361. ha->base_qpair->hw = ha;
  362. ha->base_qpair->req = req;
  363. ha->base_qpair->rsp = rsp;
  364. ha->base_qpair->vha = vha;
  365. ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
  366. ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
  367. ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
  368. ha->base_qpair->srb_mempool = ha->srb_mempool;
  369. INIT_LIST_HEAD(&ha->base_qpair->hints_list);
  370. ha->base_qpair->enable_class_2 = ql2xenableclass2;
  371. /* init qpair to this cpu. Will adjust at run time. */
  372. qla_cpu_update(rsp->qpair, raw_smp_processor_id());
  373. ha->base_qpair->pdev = ha->pdev;
  374. if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
  375. ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
  376. }
  377. static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
  378. struct rsp_que *rsp)
  379. {
  380. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  381. ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *),
  382. GFP_KERNEL);
  383. if (!ha->req_q_map) {
  384. ql_log(ql_log_fatal, vha, 0x003b,
  385. "Unable to allocate memory for request queue ptrs.\n");
  386. goto fail_req_map;
  387. }
  388. ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *),
  389. GFP_KERNEL);
  390. if (!ha->rsp_q_map) {
  391. ql_log(ql_log_fatal, vha, 0x003c,
  392. "Unable to allocate memory for response queue ptrs.\n");
  393. goto fail_rsp_map;
  394. }
  395. ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
  396. if (ha->base_qpair == NULL) {
  397. ql_log(ql_log_warn, vha, 0x00e0,
  398. "Failed to allocate base queue pair memory.\n");
  399. goto fail_base_qpair;
  400. }
  401. qla_init_base_qpair(vha, req, rsp);
  402. if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) {
  403. ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
  404. GFP_KERNEL);
  405. if (!ha->queue_pair_map) {
  406. ql_log(ql_log_fatal, vha, 0x0180,
  407. "Unable to allocate memory for queue pair ptrs.\n");
  408. goto fail_qpair_map;
  409. }
  410. if (qla_mapq_alloc_qp_cpu_map(ha) != 0) {
  411. kfree(ha->queue_pair_map);
  412. ha->queue_pair_map = NULL;
  413. goto fail_qpair_map;
  414. }
  415. }
  416. /*
  417. * Make sure we record at least the request and response queue zero in
  418. * case we need to free them if part of the probe fails.
  419. */
  420. ha->rsp_q_map[0] = rsp;
  421. ha->req_q_map[0] = req;
  422. set_bit(0, ha->rsp_qid_map);
  423. set_bit(0, ha->req_qid_map);
  424. return 0;
  425. fail_qpair_map:
  426. kfree(ha->base_qpair);
  427. ha->base_qpair = NULL;
  428. fail_base_qpair:
  429. kfree(ha->rsp_q_map);
  430. ha->rsp_q_map = NULL;
  431. fail_rsp_map:
  432. kfree(ha->req_q_map);
  433. ha->req_q_map = NULL;
  434. fail_req_map:
  435. return -ENOMEM;
  436. }
  437. static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
  438. {
  439. if (IS_QLAFX00(ha)) {
  440. if (req && req->ring_fx00)
  441. dma_free_coherent(&ha->pdev->dev,
  442. (req->length_fx00 + 1) * sizeof(request_t),
  443. req->ring_fx00, req->dma_fx00);
  444. } else if (req && req->ring)
  445. dma_free_coherent(&ha->pdev->dev,
  446. (req->length + 1) * sizeof(request_t),
  447. req->ring, req->dma);
  448. if (req)
  449. kfree(req->outstanding_cmds);
  450. kfree(req);
  451. }
  452. static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
  453. {
  454. if (IS_QLAFX00(ha)) {
  455. if (rsp && rsp->ring_fx00)
  456. dma_free_coherent(&ha->pdev->dev,
  457. (rsp->length_fx00 + 1) * sizeof(request_t),
  458. rsp->ring_fx00, rsp->dma_fx00);
  459. } else if (rsp && rsp->ring) {
  460. dma_free_coherent(&ha->pdev->dev,
  461. (rsp->length + 1) * sizeof(response_t),
  462. rsp->ring, rsp->dma);
  463. }
  464. kfree(rsp);
  465. }
  466. static void qla2x00_free_queues(struct qla_hw_data *ha)
  467. {
  468. struct req_que *req;
  469. struct rsp_que *rsp;
  470. int cnt;
  471. unsigned long flags;
  472. if (ha->queue_pair_map) {
  473. kfree(ha->queue_pair_map);
  474. ha->queue_pair_map = NULL;
  475. }
  476. if (ha->base_qpair) {
  477. kfree(ha->base_qpair);
  478. ha->base_qpair = NULL;
  479. }
  480. qla_mapq_free_qp_cpu_map(ha);
  481. spin_lock_irqsave(&ha->hardware_lock, flags);
  482. for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
  483. if (!test_bit(cnt, ha->req_qid_map))
  484. continue;
  485. req = ha->req_q_map[cnt];
  486. clear_bit(cnt, ha->req_qid_map);
  487. ha->req_q_map[cnt] = NULL;
  488. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  489. qla2x00_free_req_que(ha, req);
  490. spin_lock_irqsave(&ha->hardware_lock, flags);
  491. }
  492. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  493. kfree(ha->req_q_map);
  494. ha->req_q_map = NULL;
  495. spin_lock_irqsave(&ha->hardware_lock, flags);
  496. for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
  497. if (!test_bit(cnt, ha->rsp_qid_map))
  498. continue;
  499. rsp = ha->rsp_q_map[cnt];
  500. clear_bit(cnt, ha->rsp_qid_map);
  501. ha->rsp_q_map[cnt] = NULL;
  502. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  503. qla2x00_free_rsp_que(ha, rsp);
  504. spin_lock_irqsave(&ha->hardware_lock, flags);
  505. }
  506. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  507. kfree(ha->rsp_q_map);
  508. ha->rsp_q_map = NULL;
  509. }
  510. static char *
  511. qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
  512. {
  513. struct qla_hw_data *ha = vha->hw;
  514. static const char *const pci_bus_modes[] = {
  515. "33", "66", "100", "133",
  516. };
  517. uint16_t pci_bus;
  518. pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
  519. if (pci_bus) {
  520. snprintf(str, str_len, "PCI-X (%s MHz)",
  521. pci_bus_modes[pci_bus]);
  522. } else {
  523. pci_bus = (ha->pci_attr & BIT_8) >> 8;
  524. snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]);
  525. }
  526. return str;
  527. }
  528. static char *
  529. qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
  530. {
  531. static const char *const pci_bus_modes[] = {
  532. "33", "66", "100", "133",
  533. };
  534. struct qla_hw_data *ha = vha->hw;
  535. uint32_t pci_bus;
  536. if (pci_is_pcie(ha->pdev)) {
  537. uint32_t lstat, lspeed, lwidth;
  538. const char *speed_str;
  539. pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
  540. lspeed = lstat & PCI_EXP_LNKCAP_SLS;
  541. lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
  542. switch (lspeed) {
  543. case 1:
  544. speed_str = "2.5GT/s";
  545. break;
  546. case 2:
  547. speed_str = "5.0GT/s";
  548. break;
  549. case 3:
  550. speed_str = "8.0GT/s";
  551. break;
  552. case 4:
  553. speed_str = "16.0GT/s";
  554. break;
  555. default:
  556. speed_str = "<unknown>";
  557. break;
  558. }
  559. snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth);
  560. return str;
  561. }
  562. pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
  563. if (pci_bus == 0 || pci_bus == 8)
  564. snprintf(str, str_len, "PCI (%s MHz)",
  565. pci_bus_modes[pci_bus >> 3]);
  566. else
  567. snprintf(str, str_len, "PCI-X Mode %d (%s MHz)",
  568. pci_bus & 4 ? 2 : 1,
  569. pci_bus_modes[pci_bus & 3]);
  570. return str;
  571. }
  572. static char *
  573. qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
  574. {
  575. char un_str[10];
  576. struct qla_hw_data *ha = vha->hw;
  577. snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version,
  578. ha->fw_minor_version, ha->fw_subminor_version);
  579. if (ha->fw_attributes & BIT_9) {
  580. strcat(str, "FLX");
  581. return (str);
  582. }
  583. switch (ha->fw_attributes & 0xFF) {
  584. case 0x7:
  585. strcat(str, "EF");
  586. break;
  587. case 0x17:
  588. strcat(str, "TP");
  589. break;
  590. case 0x37:
  591. strcat(str, "IP");
  592. break;
  593. case 0x77:
  594. strcat(str, "VI");
  595. break;
  596. default:
  597. sprintf(un_str, "(%x)", ha->fw_attributes);
  598. strcat(str, un_str);
  599. break;
  600. }
  601. if (ha->fw_attributes & 0x100)
  602. strcat(str, "X");
  603. return (str);
  604. }
  605. static char *
  606. qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
  607. {
  608. struct qla_hw_data *ha = vha->hw;
  609. snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version,
  610. ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
  611. return str;
  612. }
  613. void qla2x00_sp_free_dma(srb_t *sp)
  614. {
  615. struct qla_hw_data *ha = sp->vha->hw;
  616. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  617. if (sp->flags & SRB_DMA_VALID) {
  618. scsi_dma_unmap(cmd);
  619. sp->flags &= ~SRB_DMA_VALID;
  620. }
  621. if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
  622. dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
  623. scsi_prot_sg_count(cmd), cmd->sc_data_direction);
  624. sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
  625. }
  626. if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
  627. /* List assured to be having elements */
  628. qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
  629. sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
  630. }
  631. if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
  632. struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
  633. dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
  634. sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
  635. }
  636. if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
  637. struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
  638. dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
  639. ctx1->fcp_cmnd_dma);
  640. list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
  641. ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
  642. ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
  643. mempool_free(ctx1, ha->ctx_mempool);
  644. }
  645. }
  646. void qla2x00_sp_compl(srb_t *sp, int res)
  647. {
  648. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  649. struct completion *comp = sp->comp;
  650. /* kref: INIT */
  651. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  652. cmd->result = res;
  653. sp->type = 0;
  654. scsi_done(cmd);
  655. if (comp)
  656. complete(comp);
  657. }
  658. void qla2xxx_qpair_sp_free_dma(srb_t *sp)
  659. {
  660. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  661. struct qla_hw_data *ha = sp->fcport->vha->hw;
  662. if (sp->flags & SRB_DMA_VALID) {
  663. scsi_dma_unmap(cmd);
  664. sp->flags &= ~SRB_DMA_VALID;
  665. }
  666. if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
  667. dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
  668. scsi_prot_sg_count(cmd), cmd->sc_data_direction);
  669. sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
  670. }
  671. if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
  672. /* List assured to be having elements */
  673. qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
  674. sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
  675. }
  676. if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
  677. struct crc_context *difctx = sp->u.scmd.crc_ctx;
  678. struct dsd_dma *dif_dsd, *nxt_dsd;
  679. list_for_each_entry_safe(dif_dsd, nxt_dsd,
  680. &difctx->ldif_dma_hndl_list, list) {
  681. list_del(&dif_dsd->list);
  682. dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr,
  683. dif_dsd->dsd_list_dma);
  684. kfree(dif_dsd);
  685. difctx->no_dif_bundl--;
  686. }
  687. list_for_each_entry_safe(dif_dsd, nxt_dsd,
  688. &difctx->ldif_dsd_list, list) {
  689. list_del(&dif_dsd->list);
  690. dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr,
  691. dif_dsd->dsd_list_dma);
  692. kfree(dif_dsd);
  693. difctx->no_ldif_dsd--;
  694. }
  695. if (difctx->no_ldif_dsd) {
  696. ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
  697. "%s: difctx->no_ldif_dsd=%x\n",
  698. __func__, difctx->no_ldif_dsd);
  699. }
  700. if (difctx->no_dif_bundl) {
  701. ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
  702. "%s: difctx->no_dif_bundl=%x\n",
  703. __func__, difctx->no_dif_bundl);
  704. }
  705. sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID;
  706. }
  707. if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
  708. struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
  709. dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
  710. ctx1->fcp_cmnd_dma);
  711. list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
  712. ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
  713. ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
  714. mempool_free(ctx1, ha->ctx_mempool);
  715. sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
  716. }
  717. if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
  718. struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
  719. dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
  720. sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
  721. }
  722. }
  723. void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
  724. {
  725. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  726. struct completion *comp = sp->comp;
  727. /* ref: INIT */
  728. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  729. cmd->result = res;
  730. sp->type = 0;
  731. scsi_done(cmd);
  732. if (comp)
  733. complete(comp);
  734. }
  735. static int
  736. qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
  737. {
  738. scsi_qla_host_t *vha = shost_priv(host);
  739. fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
  740. struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
  741. struct qla_hw_data *ha = vha->hw;
  742. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  743. srb_t *sp;
  744. int rval;
  745. if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) ||
  746. WARN_ON_ONCE(!rport)) {
  747. cmd->result = DID_NO_CONNECT << 16;
  748. goto qc24_fail_command;
  749. }
  750. if (ha->mqenable) {
  751. uint32_t tag;
  752. uint16_t hwq;
  753. struct qla_qpair *qpair = NULL;
  754. tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
  755. hwq = blk_mq_unique_tag_to_hwq(tag);
  756. qpair = ha->queue_pair_map[hwq];
  757. if (qpair)
  758. return qla2xxx_mqueuecommand(host, cmd, qpair);
  759. }
  760. if (ha->flags.eeh_busy) {
  761. if (ha->flags.pci_channel_io_perm_failure) {
  762. ql_dbg(ql_dbg_aer, vha, 0x9010,
  763. "PCI Channel IO permanent failure, exiting "
  764. "cmd=%p.\n", cmd);
  765. cmd->result = DID_NO_CONNECT << 16;
  766. } else {
  767. ql_dbg(ql_dbg_aer, vha, 0x9011,
  768. "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
  769. cmd->result = DID_REQUEUE << 16;
  770. }
  771. goto qc24_fail_command;
  772. }
  773. rval = fc_remote_port_chkready(rport);
  774. if (rval) {
  775. cmd->result = rval;
  776. ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
  777. "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
  778. cmd, rval);
  779. goto qc24_fail_command;
  780. }
  781. if (!vha->flags.difdix_supported &&
  782. scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
  783. ql_dbg(ql_dbg_io, vha, 0x3004,
  784. "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
  785. cmd);
  786. cmd->result = DID_NO_CONNECT << 16;
  787. goto qc24_fail_command;
  788. }
  789. if (!fcport || fcport->deleted) {
  790. cmd->result = DID_IMM_RETRY << 16;
  791. goto qc24_fail_command;
  792. }
  793. if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
  794. if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
  795. atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
  796. ql_dbg(ql_dbg_io, vha, 0x3005,
  797. "Returning DNC, fcport_state=%d loop_state=%d.\n",
  798. atomic_read(&fcport->state),
  799. atomic_read(&base_vha->loop_state));
  800. cmd->result = DID_NO_CONNECT << 16;
  801. goto qc24_fail_command;
  802. }
  803. goto qc24_target_busy;
  804. }
  805. /*
  806. * Return target busy if we've received a non-zero retry_delay_timer
  807. * in a FCP_RSP.
  808. */
  809. if (fcport->retry_delay_timestamp == 0) {
  810. /* retry delay not set */
  811. } else if (time_after(jiffies, fcport->retry_delay_timestamp))
  812. fcport->retry_delay_timestamp = 0;
  813. else
  814. goto qc24_target_busy;
  815. sp = scsi_cmd_priv(cmd);
  816. /* ref: INIT */
  817. qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
  818. sp->u.scmd.cmd = cmd;
  819. sp->type = SRB_SCSI_CMD;
  820. sp->free = qla2x00_sp_free_dma;
  821. sp->done = qla2x00_sp_compl;
  822. rval = ha->isp_ops->start_scsi(sp);
  823. if (rval != QLA_SUCCESS) {
  824. ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
  825. "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
  826. goto qc24_host_busy_free_sp;
  827. }
  828. return 0;
  829. qc24_host_busy_free_sp:
  830. /* ref: INIT */
  831. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  832. qc24_target_busy:
  833. return SCSI_MLQUEUE_TARGET_BUSY;
  834. qc24_fail_command:
  835. scsi_done(cmd);
  836. return 0;
  837. }
  838. /* For MQ supported I/O */
  839. int
  840. qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
  841. struct qla_qpair *qpair)
  842. {
  843. scsi_qla_host_t *vha = shost_priv(host);
  844. fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
  845. struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
  846. struct qla_hw_data *ha = vha->hw;
  847. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  848. srb_t *sp;
  849. int rval;
  850. rval = rport ? fc_remote_port_chkready(rport) : (DID_NO_CONNECT << 16);
  851. if (rval) {
  852. cmd->result = rval;
  853. ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
  854. "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
  855. cmd, rval);
  856. goto qc24_fail_command;
  857. }
  858. if (!qpair->online) {
  859. ql_dbg(ql_dbg_io, vha, 0x3077,
  860. "qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy);
  861. cmd->result = DID_NO_CONNECT << 16;
  862. goto qc24_fail_command;
  863. }
  864. if (!fcport || fcport->deleted) {
  865. cmd->result = DID_IMM_RETRY << 16;
  866. goto qc24_fail_command;
  867. }
  868. if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
  869. if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
  870. atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
  871. ql_dbg(ql_dbg_io, vha, 0x3077,
  872. "Returning DNC, fcport_state=%d loop_state=%d.\n",
  873. atomic_read(&fcport->state),
  874. atomic_read(&base_vha->loop_state));
  875. cmd->result = DID_NO_CONNECT << 16;
  876. goto qc24_fail_command;
  877. }
  878. goto qc24_target_busy;
  879. }
  880. /*
  881. * Return target busy if we've received a non-zero retry_delay_timer
  882. * in a FCP_RSP.
  883. */
  884. if (fcport->retry_delay_timestamp == 0) {
  885. /* retry delay not set */
  886. } else if (time_after(jiffies, fcport->retry_delay_timestamp))
  887. fcport->retry_delay_timestamp = 0;
  888. else
  889. goto qc24_target_busy;
  890. sp = scsi_cmd_priv(cmd);
  891. /* ref: INIT */
  892. qla2xxx_init_sp(sp, vha, qpair, fcport);
  893. sp->u.scmd.cmd = cmd;
  894. sp->type = SRB_SCSI_CMD;
  895. sp->free = qla2xxx_qpair_sp_free_dma;
  896. sp->done = qla2xxx_qpair_sp_compl;
  897. rval = ha->isp_ops->start_scsi_mq(sp);
  898. if (rval != QLA_SUCCESS) {
  899. ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
  900. "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
  901. goto qc24_host_busy_free_sp;
  902. }
  903. return 0;
  904. qc24_host_busy_free_sp:
  905. /* ref: INIT */
  906. kref_put(&sp->cmd_kref, qla2x00_sp_release);
  907. qc24_target_busy:
  908. return SCSI_MLQUEUE_TARGET_BUSY;
  909. qc24_fail_command:
  910. scsi_done(cmd);
  911. return 0;
  912. }
  913. /*
  914. * qla2x00_wait_for_hba_online
  915. * Wait till the HBA is online after going through
  916. * <= MAX_RETRIES_OF_ISP_ABORT or
  917. * finally HBA is disabled ie marked offline
  918. *
  919. * Input:
  920. * ha - pointer to host adapter structure
  921. *
  922. * Note:
  923. * Does context switching-Release SPIN_LOCK
  924. * (if any) before calling this routine.
  925. *
  926. * Return:
  927. * Success (Adapter is online) : 0
  928. * Failed (Adapter is offline/disabled) : 1
  929. */
  930. int
  931. qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
  932. {
  933. int return_status;
  934. unsigned long wait_online;
  935. struct qla_hw_data *ha = vha->hw;
  936. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  937. wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
  938. while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
  939. test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
  940. test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
  941. ha->dpc_active) && time_before(jiffies, wait_online)) {
  942. msleep(1000);
  943. }
  944. if (base_vha->flags.online)
  945. return_status = QLA_SUCCESS;
  946. else
  947. return_status = QLA_FUNCTION_FAILED;
  948. return (return_status);
  949. }
  950. static inline int test_fcport_count(scsi_qla_host_t *vha)
  951. {
  952. struct qla_hw_data *ha = vha->hw;
  953. unsigned long flags;
  954. int res;
  955. /* Return 0 = sleep, x=wake */
  956. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  957. ql_dbg(ql_dbg_init, vha, 0x00ec,
  958. "tgt %p, fcport_count=%d\n",
  959. vha, vha->fcport_count);
  960. res = (vha->fcport_count == 0);
  961. if (res) {
  962. struct fc_port *fcport;
  963. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  964. if (fcport->deleted != QLA_SESS_DELETED) {
  965. /* session(s) may not be fully logged in
  966. * (ie fcport_count=0), but session
  967. * deletion thread(s) may be inflight.
  968. */
  969. res = 0;
  970. break;
  971. }
  972. }
  973. }
  974. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  975. return res;
  976. }
  977. /*
  978. * qla2x00_wait_for_sess_deletion can only be called from remove_one.
  979. * it has dependency on UNLOADING flag to stop device discovery
  980. */
  981. void
  982. qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
  983. {
  984. u8 i;
  985. qla2x00_mark_all_devices_lost(vha);
  986. for (i = 0; i < 10; i++) {
  987. if (wait_event_timeout(vha->fcport_waitQ,
  988. test_fcport_count(vha), HZ) > 0)
  989. break;
  990. }
  991. flush_workqueue(vha->hw->wq);
  992. }
  993. /*
  994. * qla2x00_wait_for_hba_ready
  995. * Wait till the HBA is ready before doing driver unload
  996. *
  997. * Input:
  998. * ha - pointer to host adapter structure
  999. *
  1000. * Note:
  1001. * Does context switching-Release SPIN_LOCK
  1002. * (if any) before calling this routine.
  1003. *
  1004. */
  1005. static void
  1006. qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
  1007. {
  1008. struct qla_hw_data *ha = vha->hw;
  1009. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  1010. while ((qla2x00_reset_active(vha) || ha->dpc_active ||
  1011. ha->flags.mbox_busy) ||
  1012. test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
  1013. test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
  1014. if (test_bit(UNLOADING, &base_vha->dpc_flags))
  1015. break;
  1016. msleep(1000);
  1017. }
  1018. }
  1019. int
  1020. qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
  1021. {
  1022. int return_status;
  1023. unsigned long wait_reset;
  1024. struct qla_hw_data *ha = vha->hw;
  1025. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  1026. wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
  1027. while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
  1028. test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
  1029. test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
  1030. ha->dpc_active) && time_before(jiffies, wait_reset)) {
  1031. msleep(1000);
  1032. if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
  1033. ha->flags.chip_reset_done)
  1034. break;
  1035. }
  1036. if (ha->flags.chip_reset_done)
  1037. return_status = QLA_SUCCESS;
  1038. else
  1039. return_status = QLA_FUNCTION_FAILED;
  1040. return return_status;
  1041. }
  1042. /**************************************************************************
  1043. * qla2xxx_eh_abort
  1044. *
  1045. * Description:
  1046. * The abort function will abort the specified command.
  1047. *
  1048. * Input:
  1049. * cmd = Linux SCSI command packet to be aborted.
  1050. *
  1051. * Returns:
  1052. * Either SUCCESS or FAILED.
  1053. *
  1054. * Note:
  1055. * Only return FAILED if command not returned by firmware.
  1056. **************************************************************************/
  1057. static int
  1058. qla2xxx_eh_abort(struct scsi_cmnd *cmd)
  1059. {
  1060. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  1061. DECLARE_COMPLETION_ONSTACK(comp);
  1062. srb_t *sp;
  1063. int ret;
  1064. unsigned int id;
  1065. uint64_t lun;
  1066. int rval;
  1067. struct qla_hw_data *ha = vha->hw;
  1068. uint32_t ratov_j;
  1069. struct qla_qpair *qpair;
  1070. unsigned long flags;
  1071. int fast_fail_status = SUCCESS;
  1072. if (qla2x00_isp_reg_stat(ha)) {
  1073. ql_log(ql_log_info, vha, 0x8042,
  1074. "PCI/Register disconnect, exiting.\n");
  1075. qla_pci_set_eeh_busy(vha);
  1076. return FAILED;
  1077. }
  1078. /* Save any FAST_IO_FAIL value to return later if abort succeeds */
  1079. ret = fc_block_scsi_eh(cmd);
  1080. if (ret != 0)
  1081. fast_fail_status = ret;
  1082. sp = scsi_cmd_priv(cmd);
  1083. qpair = sp->qpair;
  1084. vha->cmd_timeout_cnt++;
  1085. if ((sp->fcport && sp->fcport->deleted) || !qpair)
  1086. return fast_fail_status != SUCCESS ? fast_fail_status : FAILED;
  1087. spin_lock_irqsave(qpair->qp_lock_ptr, flags);
  1088. sp->comp = &comp;
  1089. spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
  1090. id = cmd->device->id;
  1091. lun = cmd->device->lun;
  1092. ql_dbg(ql_dbg_taskm, vha, 0x8002,
  1093. "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
  1094. vha->host_no, id, lun, sp, cmd, sp->handle);
  1095. /*
  1096. * Abort will release the original Command/sp from FW. Let the
  1097. * original command call scsi_done. In return, he will wakeup
  1098. * this sleeping thread.
  1099. */
  1100. rval = ha->isp_ops->abort_command(sp);
  1101. ql_dbg(ql_dbg_taskm, vha, 0x8003,
  1102. "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
  1103. /* Wait for the command completion. */
  1104. ratov_j = ha->r_a_tov/10 * 4 * 1000;
  1105. ratov_j = msecs_to_jiffies(ratov_j);
  1106. switch (rval) {
  1107. case QLA_SUCCESS:
  1108. if (!wait_for_completion_timeout(&comp, ratov_j)) {
  1109. ql_dbg(ql_dbg_taskm, vha, 0xffff,
  1110. "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
  1111. __func__, ha->r_a_tov/10);
  1112. ret = FAILED;
  1113. } else {
  1114. ret = fast_fail_status;
  1115. }
  1116. break;
  1117. default:
  1118. ret = FAILED;
  1119. break;
  1120. }
  1121. sp->comp = NULL;
  1122. ql_log(ql_log_info, vha, 0x801c,
  1123. "Abort command issued nexus=%ld:%d:%llu -- %x.\n",
  1124. vha->host_no, id, lun, ret);
  1125. return ret;
  1126. }
  1127. #define ABORT_POLLING_PERIOD 1000
  1128. #define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD))
  1129. /*
  1130. * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
  1131. */
  1132. static int
  1133. __qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t,
  1134. uint64_t l, enum nexus_wait_type type)
  1135. {
  1136. int cnt, match, status;
  1137. unsigned long flags;
  1138. scsi_qla_host_t *vha = qpair->vha;
  1139. struct req_que *req = qpair->req;
  1140. srb_t *sp;
  1141. struct scsi_cmnd *cmd;
  1142. unsigned long wait_iter = ABORT_WAIT_ITER;
  1143. bool found;
  1144. struct qla_hw_data *ha = vha->hw;
  1145. status = QLA_SUCCESS;
  1146. while (wait_iter--) {
  1147. found = false;
  1148. spin_lock_irqsave(qpair->qp_lock_ptr, flags);
  1149. for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
  1150. sp = req->outstanding_cmds[cnt];
  1151. if (!sp)
  1152. continue;
  1153. if (sp->type != SRB_SCSI_CMD)
  1154. continue;
  1155. if (vha->vp_idx != sp->vha->vp_idx)
  1156. continue;
  1157. match = 0;
  1158. cmd = GET_CMD_SP(sp);
  1159. switch (type) {
  1160. case WAIT_HOST:
  1161. match = 1;
  1162. break;
  1163. case WAIT_TARGET:
  1164. if (sp->fcport)
  1165. match = sp->fcport->d_id.b24 == t;
  1166. else
  1167. match = 0;
  1168. break;
  1169. case WAIT_LUN:
  1170. if (sp->fcport)
  1171. match = (sp->fcport->d_id.b24 == t &&
  1172. cmd->device->lun == l);
  1173. else
  1174. match = 0;
  1175. break;
  1176. }
  1177. if (!match)
  1178. continue;
  1179. spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
  1180. if (unlikely(pci_channel_offline(ha->pdev)) ||
  1181. ha->flags.eeh_busy) {
  1182. ql_dbg(ql_dbg_taskm, vha, 0x8005,
  1183. "Return:eh_wait.\n");
  1184. return status;
  1185. }
  1186. /*
  1187. * SRB_SCSI_CMD is still in the outstanding_cmds array.
  1188. * it means scsi_done has not called. Wait for it to
  1189. * clear from outstanding_cmds.
  1190. */
  1191. msleep(ABORT_POLLING_PERIOD);
  1192. spin_lock_irqsave(qpair->qp_lock_ptr, flags);
  1193. found = true;
  1194. }
  1195. spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
  1196. if (!found)
  1197. break;
  1198. }
  1199. if (wait_iter == -1)
  1200. status = QLA_FUNCTION_FAILED;
  1201. return status;
  1202. }
  1203. int
  1204. qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
  1205. uint64_t l, enum nexus_wait_type type)
  1206. {
  1207. struct qla_qpair *qpair;
  1208. struct qla_hw_data *ha = vha->hw;
  1209. int i, status = QLA_SUCCESS;
  1210. status = __qla2x00_eh_wait_for_pending_commands(ha->base_qpair, t, l,
  1211. type);
  1212. for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) {
  1213. qpair = ha->queue_pair_map[i];
  1214. if (!qpair)
  1215. continue;
  1216. status = __qla2x00_eh_wait_for_pending_commands(qpair, t, l,
  1217. type);
  1218. }
  1219. return status;
  1220. }
  1221. static char *reset_errors[] = {
  1222. "HBA not online",
  1223. "HBA not ready",
  1224. "Task management failed",
  1225. "Waiting for command completions",
  1226. };
  1227. static int
  1228. qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
  1229. {
  1230. struct scsi_device *sdev = cmd->device;
  1231. scsi_qla_host_t *vha = shost_priv(sdev->host);
  1232. struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
  1233. fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
  1234. struct qla_hw_data *ha = vha->hw;
  1235. int err;
  1236. if (qla2x00_isp_reg_stat(ha)) {
  1237. ql_log(ql_log_info, vha, 0x803e,
  1238. "PCI/Register disconnect, exiting.\n");
  1239. qla_pci_set_eeh_busy(vha);
  1240. return FAILED;
  1241. }
  1242. if (!fcport) {
  1243. return FAILED;
  1244. }
  1245. err = fc_block_rport(rport);
  1246. if (err != 0)
  1247. return err;
  1248. if (fcport->deleted)
  1249. return FAILED;
  1250. ql_log(ql_log_info, vha, 0x8009,
  1251. "DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no,
  1252. sdev->id, sdev->lun, cmd);
  1253. err = 0;
  1254. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  1255. ql_log(ql_log_warn, vha, 0x800a,
  1256. "Wait for hba online failed for cmd=%p.\n", cmd);
  1257. goto eh_reset_failed;
  1258. }
  1259. err = 2;
  1260. if (ha->isp_ops->lun_reset(fcport, sdev->lun, 1)
  1261. != QLA_SUCCESS) {
  1262. ql_log(ql_log_warn, vha, 0x800c,
  1263. "do_reset failed for cmd=%p.\n", cmd);
  1264. goto eh_reset_failed;
  1265. }
  1266. err = 3;
  1267. if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24,
  1268. cmd->device->lun,
  1269. WAIT_LUN) != QLA_SUCCESS) {
  1270. ql_log(ql_log_warn, vha, 0x800d,
  1271. "wait for pending cmds failed for cmd=%p.\n", cmd);
  1272. goto eh_reset_failed;
  1273. }
  1274. ql_log(ql_log_info, vha, 0x800e,
  1275. "DEVICE RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n",
  1276. vha->host_no, sdev->id, sdev->lun, cmd);
  1277. return SUCCESS;
  1278. eh_reset_failed:
  1279. ql_log(ql_log_info, vha, 0x800f,
  1280. "DEVICE RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
  1281. reset_errors[err], vha->host_no, sdev->id, sdev->lun,
  1282. cmd);
  1283. vha->reset_cmd_err_cnt++;
  1284. return FAILED;
  1285. }
  1286. static int
  1287. qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
  1288. {
  1289. struct scsi_device *sdev = cmd->device;
  1290. struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
  1291. scsi_qla_host_t *vha = shost_priv(rport_to_shost(rport));
  1292. struct qla_hw_data *ha = vha->hw;
  1293. fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
  1294. int err;
  1295. if (qla2x00_isp_reg_stat(ha)) {
  1296. ql_log(ql_log_info, vha, 0x803f,
  1297. "PCI/Register disconnect, exiting.\n");
  1298. qla_pci_set_eeh_busy(vha);
  1299. return FAILED;
  1300. }
  1301. if (!fcport) {
  1302. return FAILED;
  1303. }
  1304. err = fc_block_rport(rport);
  1305. if (err != 0)
  1306. return err;
  1307. if (fcport->deleted)
  1308. return FAILED;
  1309. ql_log(ql_log_info, vha, 0x8009,
  1310. "TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no,
  1311. sdev->id, cmd);
  1312. err = 0;
  1313. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  1314. ql_log(ql_log_warn, vha, 0x800a,
  1315. "Wait for hba online failed for cmd=%p.\n", cmd);
  1316. goto eh_reset_failed;
  1317. }
  1318. err = 2;
  1319. if (ha->isp_ops->target_reset(fcport, 0, 0) != QLA_SUCCESS) {
  1320. ql_log(ql_log_warn, vha, 0x800c,
  1321. "target_reset failed for cmd=%p.\n", cmd);
  1322. goto eh_reset_failed;
  1323. }
  1324. err = 3;
  1325. if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24, 0,
  1326. WAIT_TARGET) != QLA_SUCCESS) {
  1327. ql_log(ql_log_warn, vha, 0x800d,
  1328. "wait for pending cmds failed for cmd=%p.\n", cmd);
  1329. goto eh_reset_failed;
  1330. }
  1331. ql_log(ql_log_info, vha, 0x800e,
  1332. "TARGET RESET SUCCEEDED nexus:%ld:%d cmd=%p.\n",
  1333. vha->host_no, sdev->id, cmd);
  1334. return SUCCESS;
  1335. eh_reset_failed:
  1336. ql_log(ql_log_info, vha, 0x800f,
  1337. "TARGET RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
  1338. reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
  1339. cmd);
  1340. vha->reset_cmd_err_cnt++;
  1341. return FAILED;
  1342. }
  1343. /**************************************************************************
  1344. * qla2xxx_eh_bus_reset
  1345. *
  1346. * Description:
  1347. * The bus reset function will reset the bus and abort any executing
  1348. * commands.
  1349. *
  1350. * Input:
  1351. * cmd = Linux SCSI command packet of the command that cause the
  1352. * bus reset.
  1353. *
  1354. * Returns:
  1355. * SUCCESS/FAILURE (defined as macro in scsi.h).
  1356. *
  1357. **************************************************************************/
  1358. static int
  1359. qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
  1360. {
  1361. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  1362. int ret = FAILED;
  1363. unsigned int id;
  1364. uint64_t lun;
  1365. struct qla_hw_data *ha = vha->hw;
  1366. if (qla2x00_isp_reg_stat(ha)) {
  1367. ql_log(ql_log_info, vha, 0x8040,
  1368. "PCI/Register disconnect, exiting.\n");
  1369. qla_pci_set_eeh_busy(vha);
  1370. return FAILED;
  1371. }
  1372. id = cmd->device->id;
  1373. lun = cmd->device->lun;
  1374. if (qla2x00_chip_is_down(vha))
  1375. return ret;
  1376. ql_log(ql_log_info, vha, 0x8012,
  1377. "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
  1378. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  1379. ql_log(ql_log_fatal, vha, 0x8013,
  1380. "Wait for hba online failed board disabled.\n");
  1381. goto eh_bus_reset_done;
  1382. }
  1383. if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
  1384. ret = SUCCESS;
  1385. if (ret == FAILED)
  1386. goto eh_bus_reset_done;
  1387. /* Flush outstanding commands. */
  1388. if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
  1389. QLA_SUCCESS) {
  1390. ql_log(ql_log_warn, vha, 0x8014,
  1391. "Wait for pending commands failed.\n");
  1392. ret = FAILED;
  1393. }
  1394. eh_bus_reset_done:
  1395. ql_log(ql_log_warn, vha, 0x802b,
  1396. "BUS RESET %s nexus=%ld:%d:%llu.\n",
  1397. (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
  1398. return ret;
  1399. }
  1400. /**************************************************************************
  1401. * qla2xxx_eh_host_reset
  1402. *
  1403. * Description:
  1404. * The reset function will reset the Adapter.
  1405. *
  1406. * Input:
  1407. * cmd = Linux SCSI command packet of the command that cause the
  1408. * adapter reset.
  1409. *
  1410. * Returns:
  1411. * Either SUCCESS or FAILED.
  1412. *
  1413. * Note:
  1414. **************************************************************************/
  1415. static int
  1416. qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
  1417. {
  1418. scsi_qla_host_t *vha = shost_priv(cmd->device->host);
  1419. struct qla_hw_data *ha = vha->hw;
  1420. int ret = FAILED;
  1421. unsigned int id;
  1422. uint64_t lun;
  1423. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  1424. if (qla2x00_isp_reg_stat(ha)) {
  1425. ql_log(ql_log_info, vha, 0x8041,
  1426. "PCI/Register disconnect, exiting.\n");
  1427. qla_pci_set_eeh_busy(vha);
  1428. return SUCCESS;
  1429. }
  1430. id = cmd->device->id;
  1431. lun = cmd->device->lun;
  1432. ql_log(ql_log_info, vha, 0x8018,
  1433. "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
  1434. /*
  1435. * No point in issuing another reset if one is active. Also do not
  1436. * attempt a reset if we are updating flash.
  1437. */
  1438. if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)
  1439. goto eh_host_reset_lock;
  1440. if (vha != base_vha) {
  1441. if (qla2x00_vp_abort_isp(vha))
  1442. goto eh_host_reset_lock;
  1443. } else {
  1444. if (IS_P3P_TYPE(vha->hw)) {
  1445. if (!qla82xx_fcoe_ctx_reset(vha)) {
  1446. /* Ctx reset success */
  1447. ret = SUCCESS;
  1448. goto eh_host_reset_lock;
  1449. }
  1450. /* fall thru if ctx reset failed */
  1451. }
  1452. if (ha->wq)
  1453. flush_workqueue(ha->wq);
  1454. set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  1455. if (ha->isp_ops->abort_isp(base_vha)) {
  1456. clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  1457. /* failed. schedule dpc to try */
  1458. set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
  1459. if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
  1460. ql_log(ql_log_warn, vha, 0x802a,
  1461. "wait for hba online failed.\n");
  1462. goto eh_host_reset_lock;
  1463. }
  1464. }
  1465. clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  1466. }
  1467. /* Waiting for command to be returned to OS.*/
  1468. if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
  1469. QLA_SUCCESS)
  1470. ret = SUCCESS;
  1471. eh_host_reset_lock:
  1472. ql_log(ql_log_info, vha, 0x8017,
  1473. "ADAPTER RESET %s nexus=%ld:%d:%llu.\n",
  1474. (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
  1475. return ret;
  1476. }
  1477. /*
  1478. * qla2x00_loop_reset
  1479. * Issue loop reset.
  1480. *
  1481. * Input:
  1482. * ha = adapter block pointer.
  1483. *
  1484. * Returns:
  1485. * 0 = success
  1486. */
  1487. int
  1488. qla2x00_loop_reset(scsi_qla_host_t *vha)
  1489. {
  1490. int ret;
  1491. struct qla_hw_data *ha = vha->hw;
  1492. if (IS_QLAFX00(ha))
  1493. return QLA_SUCCESS;
  1494. if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
  1495. atomic_set(&vha->loop_state, LOOP_DOWN);
  1496. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  1497. qla2x00_mark_all_devices_lost(vha);
  1498. ret = qla2x00_full_login_lip(vha);
  1499. if (ret != QLA_SUCCESS) {
  1500. ql_dbg(ql_dbg_taskm, vha, 0x802d,
  1501. "full_login_lip=%d.\n", ret);
  1502. }
  1503. }
  1504. if (ha->flags.enable_lip_reset) {
  1505. ret = qla2x00_lip_reset(vha);
  1506. if (ret != QLA_SUCCESS)
  1507. ql_dbg(ql_dbg_taskm, vha, 0x802e,
  1508. "lip_reset failed (%d).\n", ret);
  1509. }
  1510. /* Issue marker command only when we are going to start the I/O */
  1511. vha->marker_needed = 1;
  1512. return QLA_SUCCESS;
  1513. }
  1514. /*
  1515. * The caller must ensure that no completion interrupts will happen
  1516. * while this function is in progress.
  1517. */
  1518. static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
  1519. unsigned long *flags)
  1520. __releases(qp->qp_lock_ptr)
  1521. __acquires(qp->qp_lock_ptr)
  1522. {
  1523. DECLARE_COMPLETION_ONSTACK(comp);
  1524. scsi_qla_host_t *vha = qp->vha;
  1525. struct qla_hw_data *ha = vha->hw;
  1526. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  1527. int rval;
  1528. bool ret_cmd;
  1529. uint32_t ratov_j;
  1530. lockdep_assert_held(qp->qp_lock_ptr);
  1531. if (qla2x00_chip_is_down(vha)) {
  1532. sp->done(sp, res);
  1533. return;
  1534. }
  1535. if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
  1536. (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
  1537. !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
  1538. !qla2x00_isp_reg_stat(ha))) {
  1539. if (sp->comp) {
  1540. sp->done(sp, res);
  1541. return;
  1542. }
  1543. sp->comp = &comp;
  1544. spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
  1545. rval = ha->isp_ops->abort_command(sp);
  1546. /* Wait for command completion. */
  1547. ret_cmd = false;
  1548. ratov_j = ha->r_a_tov/10 * 4 * 1000;
  1549. ratov_j = msecs_to_jiffies(ratov_j);
  1550. switch (rval) {
  1551. case QLA_SUCCESS:
  1552. if (wait_for_completion_timeout(&comp, ratov_j)) {
  1553. ql_dbg(ql_dbg_taskm, vha, 0xffff,
  1554. "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
  1555. __func__, ha->r_a_tov/10);
  1556. ret_cmd = true;
  1557. }
  1558. /* else FW return SP to driver */
  1559. break;
  1560. default:
  1561. ret_cmd = true;
  1562. break;
  1563. }
  1564. spin_lock_irqsave(qp->qp_lock_ptr, *flags);
  1565. switch (sp->type) {
  1566. case SRB_SCSI_CMD:
  1567. if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
  1568. sp->done(sp, res);
  1569. break;
  1570. default:
  1571. if (ret_cmd)
  1572. sp->done(sp, res);
  1573. break;
  1574. }
  1575. } else {
  1576. sp->done(sp, res);
  1577. }
  1578. }
  1579. /*
  1580. * The caller must ensure that no completion interrupts will happen
  1581. * while this function is in progress.
  1582. */
  1583. static void
  1584. __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
  1585. {
  1586. int cnt;
  1587. unsigned long flags;
  1588. srb_t *sp;
  1589. scsi_qla_host_t *vha = qp->vha;
  1590. struct qla_hw_data *ha = vha->hw;
  1591. struct req_que *req;
  1592. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  1593. struct qla_tgt_cmd *cmd;
  1594. if (!ha->req_q_map)
  1595. return;
  1596. spin_lock_irqsave(qp->qp_lock_ptr, flags);
  1597. req = qp->req;
  1598. for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
  1599. sp = req->outstanding_cmds[cnt];
  1600. if (sp) {
  1601. /*
  1602. * perform lockless completion during driver unload
  1603. */
  1604. if (qla2x00_chip_is_down(vha)) {
  1605. req->outstanding_cmds[cnt] = NULL;
  1606. spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
  1607. sp->done(sp, res);
  1608. spin_lock_irqsave(qp->qp_lock_ptr, flags);
  1609. continue;
  1610. }
  1611. switch (sp->cmd_type) {
  1612. case TYPE_SRB:
  1613. qla2x00_abort_srb(qp, sp, res, &flags);
  1614. break;
  1615. case TYPE_TGT_CMD:
  1616. if (!vha->hw->tgt.tgt_ops || !tgt ||
  1617. qla_ini_mode_enabled(vha)) {
  1618. ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
  1619. "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
  1620. vha->dpc_flags);
  1621. continue;
  1622. }
  1623. cmd = (struct qla_tgt_cmd *)sp;
  1624. cmd->aborted = 1;
  1625. break;
  1626. case TYPE_TGT_TMCMD:
  1627. /* Skip task management functions. */
  1628. break;
  1629. default:
  1630. break;
  1631. }
  1632. req->outstanding_cmds[cnt] = NULL;
  1633. }
  1634. }
  1635. spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
  1636. }
  1637. /*
  1638. * The caller must ensure that no completion interrupts will happen
  1639. * while this function is in progress.
  1640. */
  1641. void
  1642. qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
  1643. {
  1644. int que;
  1645. struct qla_hw_data *ha = vha->hw;
  1646. /* Continue only if initialization complete. */
  1647. if (!ha->base_qpair)
  1648. return;
  1649. __qla2x00_abort_all_cmds(ha->base_qpair, res);
  1650. if (!ha->queue_pair_map)
  1651. return;
  1652. for (que = 0; que < ha->max_qpairs; que++) {
  1653. if (!ha->queue_pair_map[que])
  1654. continue;
  1655. __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res);
  1656. }
  1657. }
  1658. static int
  1659. qla2xxx_slave_alloc(struct scsi_device *sdev)
  1660. {
  1661. struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
  1662. if (!rport || fc_remote_port_chkready(rport))
  1663. return -ENXIO;
  1664. sdev->hostdata = *(fc_port_t **)rport->dd_data;
  1665. return 0;
  1666. }
  1667. static int
  1668. qla2xxx_slave_configure(struct scsi_device *sdev)
  1669. {
  1670. scsi_qla_host_t *vha = shost_priv(sdev->host);
  1671. struct req_que *req = vha->req;
  1672. if (IS_T10_PI_CAPABLE(vha->hw))
  1673. blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
  1674. scsi_change_queue_depth(sdev, req->max_q_depth);
  1675. return 0;
  1676. }
  1677. static void
  1678. qla2xxx_slave_destroy(struct scsi_device *sdev)
  1679. {
  1680. sdev->hostdata = NULL;
  1681. }
  1682. /**
  1683. * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
  1684. * @ha: HA context
  1685. *
  1686. * At exit, the @ha's flags.enable_64bit_addressing set to indicated
  1687. * supported addressing method.
  1688. */
  1689. static void
  1690. qla2x00_config_dma_addressing(struct qla_hw_data *ha)
  1691. {
  1692. /* Assume a 32bit DMA mask. */
  1693. ha->flags.enable_64bit_addressing = 0;
  1694. if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
  1695. /* Any upper-dword bits set? */
  1696. if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
  1697. !dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
  1698. /* Ok, a 64bit DMA mask is applicable. */
  1699. ha->flags.enable_64bit_addressing = 1;
  1700. ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
  1701. ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
  1702. return;
  1703. }
  1704. }
  1705. dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
  1706. dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
  1707. }
  1708. static void
  1709. qla2x00_enable_intrs(struct qla_hw_data *ha)
  1710. {
  1711. unsigned long flags = 0;
  1712. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  1713. spin_lock_irqsave(&ha->hardware_lock, flags);
  1714. ha->interrupts_on = 1;
  1715. /* enable risc and host interrupts */
  1716. wrt_reg_word(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
  1717. rd_reg_word(&reg->ictrl);
  1718. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1719. }
  1720. static void
  1721. qla2x00_disable_intrs(struct qla_hw_data *ha)
  1722. {
  1723. unsigned long flags = 0;
  1724. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  1725. spin_lock_irqsave(&ha->hardware_lock, flags);
  1726. ha->interrupts_on = 0;
  1727. /* disable risc and host interrupts */
  1728. wrt_reg_word(&reg->ictrl, 0);
  1729. rd_reg_word(&reg->ictrl);
  1730. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1731. }
  1732. static void
  1733. qla24xx_enable_intrs(struct qla_hw_data *ha)
  1734. {
  1735. unsigned long flags = 0;
  1736. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  1737. spin_lock_irqsave(&ha->hardware_lock, flags);
  1738. ha->interrupts_on = 1;
  1739. wrt_reg_dword(&reg->ictrl, ICRX_EN_RISC_INT);
  1740. rd_reg_dword(&reg->ictrl);
  1741. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1742. }
  1743. static void
  1744. qla24xx_disable_intrs(struct qla_hw_data *ha)
  1745. {
  1746. unsigned long flags = 0;
  1747. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  1748. if (IS_NOPOLLING_TYPE(ha))
  1749. return;
  1750. spin_lock_irqsave(&ha->hardware_lock, flags);
  1751. ha->interrupts_on = 0;
  1752. wrt_reg_dword(&reg->ictrl, 0);
  1753. rd_reg_dword(&reg->ictrl);
  1754. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1755. }
  1756. static int
  1757. qla2x00_iospace_config(struct qla_hw_data *ha)
  1758. {
  1759. resource_size_t pio;
  1760. uint16_t msix;
  1761. if (pci_request_selected_regions(ha->pdev, ha->bars,
  1762. QLA2XXX_DRIVER_NAME)) {
  1763. ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
  1764. "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
  1765. pci_name(ha->pdev));
  1766. goto iospace_error_exit;
  1767. }
  1768. if (!(ha->bars & 1))
  1769. goto skip_pio;
  1770. /* We only need PIO for Flash operations on ISP2312 v2 chips. */
  1771. pio = pci_resource_start(ha->pdev, 0);
  1772. if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
  1773. if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
  1774. ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
  1775. "Invalid pci I/O region size (%s).\n",
  1776. pci_name(ha->pdev));
  1777. pio = 0;
  1778. }
  1779. } else {
  1780. ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
  1781. "Region #0 no a PIO resource (%s).\n",
  1782. pci_name(ha->pdev));
  1783. pio = 0;
  1784. }
  1785. ha->pio_address = pio;
  1786. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
  1787. "PIO address=%llu.\n",
  1788. (unsigned long long)ha->pio_address);
  1789. skip_pio:
  1790. /* Use MMIO operations for all accesses. */
  1791. if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
  1792. ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
  1793. "Region #1 not an MMIO resource (%s), aborting.\n",
  1794. pci_name(ha->pdev));
  1795. goto iospace_error_exit;
  1796. }
  1797. if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
  1798. ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
  1799. "Invalid PCI mem region size (%s), aborting.\n",
  1800. pci_name(ha->pdev));
  1801. goto iospace_error_exit;
  1802. }
  1803. ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
  1804. if (!ha->iobase) {
  1805. ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
  1806. "Cannot remap MMIO (%s), aborting.\n",
  1807. pci_name(ha->pdev));
  1808. goto iospace_error_exit;
  1809. }
  1810. /* Determine queue resources */
  1811. ha->max_req_queues = ha->max_rsp_queues = 1;
  1812. ha->msix_count = QLA_BASE_VECTORS;
  1813. /* Check if FW supports MQ or not */
  1814. if (!(ha->fw_attributes & BIT_6))
  1815. goto mqiobase_exit;
  1816. if (!ql2xmqsupport || !ql2xnvmeenable ||
  1817. (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
  1818. goto mqiobase_exit;
  1819. ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
  1820. pci_resource_len(ha->pdev, 3));
  1821. if (ha->mqiobase) {
  1822. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
  1823. "MQIO Base=%p.\n", ha->mqiobase);
  1824. /* Read MSIX vector size of the board */
  1825. pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
  1826. ha->msix_count = msix + 1;
  1827. /* Max queues are bounded by available msix vectors */
  1828. /* MB interrupt uses 1 vector */
  1829. ha->max_req_queues = ha->msix_count - 1;
  1830. ha->max_rsp_queues = ha->max_req_queues;
  1831. /* Queue pairs is the max value minus the base queue pair */
  1832. ha->max_qpairs = ha->max_rsp_queues - 1;
  1833. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
  1834. "Max no of queues pairs: %d.\n", ha->max_qpairs);
  1835. ql_log_pci(ql_log_info, ha->pdev, 0x001a,
  1836. "MSI-X vector count: %d.\n", ha->msix_count);
  1837. } else
  1838. ql_log_pci(ql_log_info, ha->pdev, 0x001b,
  1839. "BAR 3 not enabled.\n");
  1840. mqiobase_exit:
  1841. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
  1842. "MSIX Count: %d.\n", ha->msix_count);
  1843. return (0);
  1844. iospace_error_exit:
  1845. return (-ENOMEM);
  1846. }
  1847. static int
  1848. qla83xx_iospace_config(struct qla_hw_data *ha)
  1849. {
  1850. uint16_t msix;
  1851. if (pci_request_selected_regions(ha->pdev, ha->bars,
  1852. QLA2XXX_DRIVER_NAME)) {
  1853. ql_log_pci(ql_log_fatal, ha->pdev, 0x0117,
  1854. "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
  1855. pci_name(ha->pdev));
  1856. goto iospace_error_exit;
  1857. }
  1858. /* Use MMIO operations for all accesses. */
  1859. if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
  1860. ql_log_pci(ql_log_warn, ha->pdev, 0x0118,
  1861. "Invalid pci I/O region size (%s).\n",
  1862. pci_name(ha->pdev));
  1863. goto iospace_error_exit;
  1864. }
  1865. if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
  1866. ql_log_pci(ql_log_warn, ha->pdev, 0x0119,
  1867. "Invalid PCI mem region size (%s), aborting\n",
  1868. pci_name(ha->pdev));
  1869. goto iospace_error_exit;
  1870. }
  1871. ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN);
  1872. if (!ha->iobase) {
  1873. ql_log_pci(ql_log_fatal, ha->pdev, 0x011a,
  1874. "Cannot remap MMIO (%s), aborting.\n",
  1875. pci_name(ha->pdev));
  1876. goto iospace_error_exit;
  1877. }
  1878. /* 64bit PCI BAR - BAR2 will correspoond to region 4 */
  1879. /* 83XX 26XX always use MQ type access for queues
  1880. * - mbar 2, a.k.a region 4 */
  1881. ha->max_req_queues = ha->max_rsp_queues = 1;
  1882. ha->msix_count = QLA_BASE_VECTORS;
  1883. ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
  1884. pci_resource_len(ha->pdev, 4));
  1885. if (!ha->mqiobase) {
  1886. ql_log_pci(ql_log_fatal, ha->pdev, 0x011d,
  1887. "BAR2/region4 not enabled\n");
  1888. goto mqiobase_exit;
  1889. }
  1890. ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
  1891. pci_resource_len(ha->pdev, 2));
  1892. if (ha->msixbase) {
  1893. /* Read MSIX vector size of the board */
  1894. pci_read_config_word(ha->pdev,
  1895. QLA_83XX_PCI_MSIX_CONTROL, &msix);
  1896. ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1;
  1897. /*
  1898. * By default, driver uses at least two msix vectors
  1899. * (default & rspq)
  1900. */
  1901. if (ql2xmqsupport || ql2xnvmeenable) {
  1902. /* MB interrupt uses 1 vector */
  1903. ha->max_req_queues = ha->msix_count - 1;
  1904. /* ATIOQ needs 1 vector. That's 1 less QPair */
  1905. if (QLA_TGT_MODE_ENABLED())
  1906. ha->max_req_queues--;
  1907. ha->max_rsp_queues = ha->max_req_queues;
  1908. /* Queue pairs is the max value minus
  1909. * the base queue pair */
  1910. ha->max_qpairs = ha->max_req_queues - 1;
  1911. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3,
  1912. "Max no of queues pairs: %d.\n", ha->max_qpairs);
  1913. }
  1914. ql_log_pci(ql_log_info, ha->pdev, 0x011c,
  1915. "MSI-X vector count: %d.\n", ha->msix_count);
  1916. } else
  1917. ql_log_pci(ql_log_info, ha->pdev, 0x011e,
  1918. "BAR 1 not enabled.\n");
  1919. mqiobase_exit:
  1920. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
  1921. "MSIX Count: %d.\n", ha->msix_count);
  1922. return 0;
  1923. iospace_error_exit:
  1924. return -ENOMEM;
  1925. }
  1926. static struct isp_operations qla2100_isp_ops = {
  1927. .pci_config = qla2100_pci_config,
  1928. .reset_chip = qla2x00_reset_chip,
  1929. .chip_diag = qla2x00_chip_diag,
  1930. .config_rings = qla2x00_config_rings,
  1931. .reset_adapter = qla2x00_reset_adapter,
  1932. .nvram_config = qla2x00_nvram_config,
  1933. .update_fw_options = qla2x00_update_fw_options,
  1934. .load_risc = qla2x00_load_risc,
  1935. .pci_info_str = qla2x00_pci_info_str,
  1936. .fw_version_str = qla2x00_fw_version_str,
  1937. .intr_handler = qla2100_intr_handler,
  1938. .enable_intrs = qla2x00_enable_intrs,
  1939. .disable_intrs = qla2x00_disable_intrs,
  1940. .abort_command = qla2x00_abort_command,
  1941. .target_reset = qla2x00_abort_target,
  1942. .lun_reset = qla2x00_lun_reset,
  1943. .fabric_login = qla2x00_login_fabric,
  1944. .fabric_logout = qla2x00_fabric_logout,
  1945. .calc_req_entries = qla2x00_calc_iocbs_32,
  1946. .build_iocbs = qla2x00_build_scsi_iocbs_32,
  1947. .prep_ms_iocb = qla2x00_prep_ms_iocb,
  1948. .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
  1949. .read_nvram = qla2x00_read_nvram_data,
  1950. .write_nvram = qla2x00_write_nvram_data,
  1951. .fw_dump = qla2100_fw_dump,
  1952. .beacon_on = NULL,
  1953. .beacon_off = NULL,
  1954. .beacon_blink = NULL,
  1955. .read_optrom = qla2x00_read_optrom_data,
  1956. .write_optrom = qla2x00_write_optrom_data,
  1957. .get_flash_version = qla2x00_get_flash_version,
  1958. .start_scsi = qla2x00_start_scsi,
  1959. .start_scsi_mq = NULL,
  1960. .abort_isp = qla2x00_abort_isp,
  1961. .iospace_config = qla2x00_iospace_config,
  1962. .initialize_adapter = qla2x00_initialize_adapter,
  1963. };
  1964. static struct isp_operations qla2300_isp_ops = {
  1965. .pci_config = qla2300_pci_config,
  1966. .reset_chip = qla2x00_reset_chip,
  1967. .chip_diag = qla2x00_chip_diag,
  1968. .config_rings = qla2x00_config_rings,
  1969. .reset_adapter = qla2x00_reset_adapter,
  1970. .nvram_config = qla2x00_nvram_config,
  1971. .update_fw_options = qla2x00_update_fw_options,
  1972. .load_risc = qla2x00_load_risc,
  1973. .pci_info_str = qla2x00_pci_info_str,
  1974. .fw_version_str = qla2x00_fw_version_str,
  1975. .intr_handler = qla2300_intr_handler,
  1976. .enable_intrs = qla2x00_enable_intrs,
  1977. .disable_intrs = qla2x00_disable_intrs,
  1978. .abort_command = qla2x00_abort_command,
  1979. .target_reset = qla2x00_abort_target,
  1980. .lun_reset = qla2x00_lun_reset,
  1981. .fabric_login = qla2x00_login_fabric,
  1982. .fabric_logout = qla2x00_fabric_logout,
  1983. .calc_req_entries = qla2x00_calc_iocbs_32,
  1984. .build_iocbs = qla2x00_build_scsi_iocbs_32,
  1985. .prep_ms_iocb = qla2x00_prep_ms_iocb,
  1986. .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
  1987. .read_nvram = qla2x00_read_nvram_data,
  1988. .write_nvram = qla2x00_write_nvram_data,
  1989. .fw_dump = qla2300_fw_dump,
  1990. .beacon_on = qla2x00_beacon_on,
  1991. .beacon_off = qla2x00_beacon_off,
  1992. .beacon_blink = qla2x00_beacon_blink,
  1993. .read_optrom = qla2x00_read_optrom_data,
  1994. .write_optrom = qla2x00_write_optrom_data,
  1995. .get_flash_version = qla2x00_get_flash_version,
  1996. .start_scsi = qla2x00_start_scsi,
  1997. .start_scsi_mq = NULL,
  1998. .abort_isp = qla2x00_abort_isp,
  1999. .iospace_config = qla2x00_iospace_config,
  2000. .initialize_adapter = qla2x00_initialize_adapter,
  2001. };
  2002. static struct isp_operations qla24xx_isp_ops = {
  2003. .pci_config = qla24xx_pci_config,
  2004. .reset_chip = qla24xx_reset_chip,
  2005. .chip_diag = qla24xx_chip_diag,
  2006. .config_rings = qla24xx_config_rings,
  2007. .reset_adapter = qla24xx_reset_adapter,
  2008. .nvram_config = qla24xx_nvram_config,
  2009. .update_fw_options = qla24xx_update_fw_options,
  2010. .load_risc = qla24xx_load_risc,
  2011. .pci_info_str = qla24xx_pci_info_str,
  2012. .fw_version_str = qla24xx_fw_version_str,
  2013. .intr_handler = qla24xx_intr_handler,
  2014. .enable_intrs = qla24xx_enable_intrs,
  2015. .disable_intrs = qla24xx_disable_intrs,
  2016. .abort_command = qla24xx_abort_command,
  2017. .target_reset = qla24xx_abort_target,
  2018. .lun_reset = qla24xx_lun_reset,
  2019. .fabric_login = qla24xx_login_fabric,
  2020. .fabric_logout = qla24xx_fabric_logout,
  2021. .calc_req_entries = NULL,
  2022. .build_iocbs = NULL,
  2023. .prep_ms_iocb = qla24xx_prep_ms_iocb,
  2024. .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
  2025. .read_nvram = qla24xx_read_nvram_data,
  2026. .write_nvram = qla24xx_write_nvram_data,
  2027. .fw_dump = qla24xx_fw_dump,
  2028. .beacon_on = qla24xx_beacon_on,
  2029. .beacon_off = qla24xx_beacon_off,
  2030. .beacon_blink = qla24xx_beacon_blink,
  2031. .read_optrom = qla24xx_read_optrom_data,
  2032. .write_optrom = qla24xx_write_optrom_data,
  2033. .get_flash_version = qla24xx_get_flash_version,
  2034. .start_scsi = qla24xx_start_scsi,
  2035. .start_scsi_mq = NULL,
  2036. .abort_isp = qla2x00_abort_isp,
  2037. .iospace_config = qla2x00_iospace_config,
  2038. .initialize_adapter = qla2x00_initialize_adapter,
  2039. };
  2040. static struct isp_operations qla25xx_isp_ops = {
  2041. .pci_config = qla25xx_pci_config,
  2042. .reset_chip = qla24xx_reset_chip,
  2043. .chip_diag = qla24xx_chip_diag,
  2044. .config_rings = qla24xx_config_rings,
  2045. .reset_adapter = qla24xx_reset_adapter,
  2046. .nvram_config = qla24xx_nvram_config,
  2047. .update_fw_options = qla24xx_update_fw_options,
  2048. .load_risc = qla24xx_load_risc,
  2049. .pci_info_str = qla24xx_pci_info_str,
  2050. .fw_version_str = qla24xx_fw_version_str,
  2051. .intr_handler = qla24xx_intr_handler,
  2052. .enable_intrs = qla24xx_enable_intrs,
  2053. .disable_intrs = qla24xx_disable_intrs,
  2054. .abort_command = qla24xx_abort_command,
  2055. .target_reset = qla24xx_abort_target,
  2056. .lun_reset = qla24xx_lun_reset,
  2057. .fabric_login = qla24xx_login_fabric,
  2058. .fabric_logout = qla24xx_fabric_logout,
  2059. .calc_req_entries = NULL,
  2060. .build_iocbs = NULL,
  2061. .prep_ms_iocb = qla24xx_prep_ms_iocb,
  2062. .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
  2063. .read_nvram = qla25xx_read_nvram_data,
  2064. .write_nvram = qla25xx_write_nvram_data,
  2065. .fw_dump = qla25xx_fw_dump,
  2066. .beacon_on = qla24xx_beacon_on,
  2067. .beacon_off = qla24xx_beacon_off,
  2068. .beacon_blink = qla24xx_beacon_blink,
  2069. .read_optrom = qla25xx_read_optrom_data,
  2070. .write_optrom = qla24xx_write_optrom_data,
  2071. .get_flash_version = qla24xx_get_flash_version,
  2072. .start_scsi = qla24xx_dif_start_scsi,
  2073. .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
  2074. .abort_isp = qla2x00_abort_isp,
  2075. .iospace_config = qla2x00_iospace_config,
  2076. .initialize_adapter = qla2x00_initialize_adapter,
  2077. };
  2078. static struct isp_operations qla81xx_isp_ops = {
  2079. .pci_config = qla25xx_pci_config,
  2080. .reset_chip = qla24xx_reset_chip,
  2081. .chip_diag = qla24xx_chip_diag,
  2082. .config_rings = qla24xx_config_rings,
  2083. .reset_adapter = qla24xx_reset_adapter,
  2084. .nvram_config = qla81xx_nvram_config,
  2085. .update_fw_options = qla24xx_update_fw_options,
  2086. .load_risc = qla81xx_load_risc,
  2087. .pci_info_str = qla24xx_pci_info_str,
  2088. .fw_version_str = qla24xx_fw_version_str,
  2089. .intr_handler = qla24xx_intr_handler,
  2090. .enable_intrs = qla24xx_enable_intrs,
  2091. .disable_intrs = qla24xx_disable_intrs,
  2092. .abort_command = qla24xx_abort_command,
  2093. .target_reset = qla24xx_abort_target,
  2094. .lun_reset = qla24xx_lun_reset,
  2095. .fabric_login = qla24xx_login_fabric,
  2096. .fabric_logout = qla24xx_fabric_logout,
  2097. .calc_req_entries = NULL,
  2098. .build_iocbs = NULL,
  2099. .prep_ms_iocb = qla24xx_prep_ms_iocb,
  2100. .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
  2101. .read_nvram = NULL,
  2102. .write_nvram = NULL,
  2103. .fw_dump = qla81xx_fw_dump,
  2104. .beacon_on = qla24xx_beacon_on,
  2105. .beacon_off = qla24xx_beacon_off,
  2106. .beacon_blink = qla83xx_beacon_blink,
  2107. .read_optrom = qla25xx_read_optrom_data,
  2108. .write_optrom = qla24xx_write_optrom_data,
  2109. .get_flash_version = qla24xx_get_flash_version,
  2110. .start_scsi = qla24xx_dif_start_scsi,
  2111. .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
  2112. .abort_isp = qla2x00_abort_isp,
  2113. .iospace_config = qla2x00_iospace_config,
  2114. .initialize_adapter = qla2x00_initialize_adapter,
  2115. };
  2116. static struct isp_operations qla82xx_isp_ops = {
  2117. .pci_config = qla82xx_pci_config,
  2118. .reset_chip = qla82xx_reset_chip,
  2119. .chip_diag = qla24xx_chip_diag,
  2120. .config_rings = qla82xx_config_rings,
  2121. .reset_adapter = qla24xx_reset_adapter,
  2122. .nvram_config = qla81xx_nvram_config,
  2123. .update_fw_options = qla24xx_update_fw_options,
  2124. .load_risc = qla82xx_load_risc,
  2125. .pci_info_str = qla24xx_pci_info_str,
  2126. .fw_version_str = qla24xx_fw_version_str,
  2127. .intr_handler = qla82xx_intr_handler,
  2128. .enable_intrs = qla82xx_enable_intrs,
  2129. .disable_intrs = qla82xx_disable_intrs,
  2130. .abort_command = qla24xx_abort_command,
  2131. .target_reset = qla24xx_abort_target,
  2132. .lun_reset = qla24xx_lun_reset,
  2133. .fabric_login = qla24xx_login_fabric,
  2134. .fabric_logout = qla24xx_fabric_logout,
  2135. .calc_req_entries = NULL,
  2136. .build_iocbs = NULL,
  2137. .prep_ms_iocb = qla24xx_prep_ms_iocb,
  2138. .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
  2139. .read_nvram = qla24xx_read_nvram_data,
  2140. .write_nvram = qla24xx_write_nvram_data,
  2141. .fw_dump = qla82xx_fw_dump,
  2142. .beacon_on = qla82xx_beacon_on,
  2143. .beacon_off = qla82xx_beacon_off,
  2144. .beacon_blink = NULL,
  2145. .read_optrom = qla82xx_read_optrom_data,
  2146. .write_optrom = qla82xx_write_optrom_data,
  2147. .get_flash_version = qla82xx_get_flash_version,
  2148. .start_scsi = qla82xx_start_scsi,
  2149. .start_scsi_mq = NULL,
  2150. .abort_isp = qla82xx_abort_isp,
  2151. .iospace_config = qla82xx_iospace_config,
  2152. .initialize_adapter = qla2x00_initialize_adapter,
  2153. };
  2154. static struct isp_operations qla8044_isp_ops = {
  2155. .pci_config = qla82xx_pci_config,
  2156. .reset_chip = qla82xx_reset_chip,
  2157. .chip_diag = qla24xx_chip_diag,
  2158. .config_rings = qla82xx_config_rings,
  2159. .reset_adapter = qla24xx_reset_adapter,
  2160. .nvram_config = qla81xx_nvram_config,
  2161. .update_fw_options = qla24xx_update_fw_options,
  2162. .load_risc = qla82xx_load_risc,
  2163. .pci_info_str = qla24xx_pci_info_str,
  2164. .fw_version_str = qla24xx_fw_version_str,
  2165. .intr_handler = qla8044_intr_handler,
  2166. .enable_intrs = qla82xx_enable_intrs,
  2167. .disable_intrs = qla82xx_disable_intrs,
  2168. .abort_command = qla24xx_abort_command,
  2169. .target_reset = qla24xx_abort_target,
  2170. .lun_reset = qla24xx_lun_reset,
  2171. .fabric_login = qla24xx_login_fabric,
  2172. .fabric_logout = qla24xx_fabric_logout,
  2173. .calc_req_entries = NULL,
  2174. .build_iocbs = NULL,
  2175. .prep_ms_iocb = qla24xx_prep_ms_iocb,
  2176. .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
  2177. .read_nvram = NULL,
  2178. .write_nvram = NULL,
  2179. .fw_dump = qla8044_fw_dump,
  2180. .beacon_on = qla82xx_beacon_on,
  2181. .beacon_off = qla82xx_beacon_off,
  2182. .beacon_blink = NULL,
  2183. .read_optrom = qla8044_read_optrom_data,
  2184. .write_optrom = qla8044_write_optrom_data,
  2185. .get_flash_version = qla82xx_get_flash_version,
  2186. .start_scsi = qla82xx_start_scsi,
  2187. .start_scsi_mq = NULL,
  2188. .abort_isp = qla8044_abort_isp,
  2189. .iospace_config = qla82xx_iospace_config,
  2190. .initialize_adapter = qla2x00_initialize_adapter,
  2191. };
  2192. static struct isp_operations qla83xx_isp_ops = {
  2193. .pci_config = qla25xx_pci_config,
  2194. .reset_chip = qla24xx_reset_chip,
  2195. .chip_diag = qla24xx_chip_diag,
  2196. .config_rings = qla24xx_config_rings,
  2197. .reset_adapter = qla24xx_reset_adapter,
  2198. .nvram_config = qla81xx_nvram_config,
  2199. .update_fw_options = qla24xx_update_fw_options,
  2200. .load_risc = qla81xx_load_risc,
  2201. .pci_info_str = qla24xx_pci_info_str,
  2202. .fw_version_str = qla24xx_fw_version_str,
  2203. .intr_handler = qla24xx_intr_handler,
  2204. .enable_intrs = qla24xx_enable_intrs,
  2205. .disable_intrs = qla24xx_disable_intrs,
  2206. .abort_command = qla24xx_abort_command,
  2207. .target_reset = qla24xx_abort_target,
  2208. .lun_reset = qla24xx_lun_reset,
  2209. .fabric_login = qla24xx_login_fabric,
  2210. .fabric_logout = qla24xx_fabric_logout,
  2211. .calc_req_entries = NULL,
  2212. .build_iocbs = NULL,
  2213. .prep_ms_iocb = qla24xx_prep_ms_iocb,
  2214. .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
  2215. .read_nvram = NULL,
  2216. .write_nvram = NULL,
  2217. .fw_dump = qla83xx_fw_dump,
  2218. .beacon_on = qla24xx_beacon_on,
  2219. .beacon_off = qla24xx_beacon_off,
  2220. .beacon_blink = qla83xx_beacon_blink,
  2221. .read_optrom = qla25xx_read_optrom_data,
  2222. .write_optrom = qla24xx_write_optrom_data,
  2223. .get_flash_version = qla24xx_get_flash_version,
  2224. .start_scsi = qla24xx_dif_start_scsi,
  2225. .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
  2226. .abort_isp = qla2x00_abort_isp,
  2227. .iospace_config = qla83xx_iospace_config,
  2228. .initialize_adapter = qla2x00_initialize_adapter,
  2229. };
  2230. static struct isp_operations qlafx00_isp_ops = {
  2231. .pci_config = qlafx00_pci_config,
  2232. .reset_chip = qlafx00_soft_reset,
  2233. .chip_diag = qlafx00_chip_diag,
  2234. .config_rings = qlafx00_config_rings,
  2235. .reset_adapter = qlafx00_soft_reset,
  2236. .nvram_config = NULL,
  2237. .update_fw_options = NULL,
  2238. .load_risc = NULL,
  2239. .pci_info_str = qlafx00_pci_info_str,
  2240. .fw_version_str = qlafx00_fw_version_str,
  2241. .intr_handler = qlafx00_intr_handler,
  2242. .enable_intrs = qlafx00_enable_intrs,
  2243. .disable_intrs = qlafx00_disable_intrs,
  2244. .abort_command = qla24xx_async_abort_command,
  2245. .target_reset = qlafx00_abort_target,
  2246. .lun_reset = qlafx00_lun_reset,
  2247. .fabric_login = NULL,
  2248. .fabric_logout = NULL,
  2249. .calc_req_entries = NULL,
  2250. .build_iocbs = NULL,
  2251. .prep_ms_iocb = qla24xx_prep_ms_iocb,
  2252. .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
  2253. .read_nvram = qla24xx_read_nvram_data,
  2254. .write_nvram = qla24xx_write_nvram_data,
  2255. .fw_dump = NULL,
  2256. .beacon_on = qla24xx_beacon_on,
  2257. .beacon_off = qla24xx_beacon_off,
  2258. .beacon_blink = NULL,
  2259. .read_optrom = qla24xx_read_optrom_data,
  2260. .write_optrom = qla24xx_write_optrom_data,
  2261. .get_flash_version = qla24xx_get_flash_version,
  2262. .start_scsi = qlafx00_start_scsi,
  2263. .start_scsi_mq = NULL,
  2264. .abort_isp = qlafx00_abort_isp,
  2265. .iospace_config = qlafx00_iospace_config,
  2266. .initialize_adapter = qlafx00_initialize_adapter,
  2267. };
  2268. static struct isp_operations qla27xx_isp_ops = {
  2269. .pci_config = qla25xx_pci_config,
  2270. .reset_chip = qla24xx_reset_chip,
  2271. .chip_diag = qla24xx_chip_diag,
  2272. .config_rings = qla24xx_config_rings,
  2273. .reset_adapter = qla24xx_reset_adapter,
  2274. .nvram_config = qla81xx_nvram_config,
  2275. .update_fw_options = qla24xx_update_fw_options,
  2276. .load_risc = qla81xx_load_risc,
  2277. .pci_info_str = qla24xx_pci_info_str,
  2278. .fw_version_str = qla24xx_fw_version_str,
  2279. .intr_handler = qla24xx_intr_handler,
  2280. .enable_intrs = qla24xx_enable_intrs,
  2281. .disable_intrs = qla24xx_disable_intrs,
  2282. .abort_command = qla24xx_abort_command,
  2283. .target_reset = qla24xx_abort_target,
  2284. .lun_reset = qla24xx_lun_reset,
  2285. .fabric_login = qla24xx_login_fabric,
  2286. .fabric_logout = qla24xx_fabric_logout,
  2287. .calc_req_entries = NULL,
  2288. .build_iocbs = NULL,
  2289. .prep_ms_iocb = qla24xx_prep_ms_iocb,
  2290. .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
  2291. .read_nvram = NULL,
  2292. .write_nvram = NULL,
  2293. .fw_dump = qla27xx_fwdump,
  2294. .mpi_fw_dump = qla27xx_mpi_fwdump,
  2295. .beacon_on = qla24xx_beacon_on,
  2296. .beacon_off = qla24xx_beacon_off,
  2297. .beacon_blink = qla83xx_beacon_blink,
  2298. .read_optrom = qla25xx_read_optrom_data,
  2299. .write_optrom = qla24xx_write_optrom_data,
  2300. .get_flash_version = qla24xx_get_flash_version,
  2301. .start_scsi = qla24xx_dif_start_scsi,
  2302. .start_scsi_mq = qla2xxx_dif_start_scsi_mq,
  2303. .abort_isp = qla2x00_abort_isp,
  2304. .iospace_config = qla83xx_iospace_config,
  2305. .initialize_adapter = qla2x00_initialize_adapter,
  2306. };
  2307. static inline void
  2308. qla2x00_set_isp_flags(struct qla_hw_data *ha)
  2309. {
  2310. ha->device_type = DT_EXTENDED_IDS;
  2311. switch (ha->pdev->device) {
  2312. case PCI_DEVICE_ID_QLOGIC_ISP2100:
  2313. ha->isp_type |= DT_ISP2100;
  2314. ha->device_type &= ~DT_EXTENDED_IDS;
  2315. ha->fw_srisc_address = RISC_START_ADDRESS_2100;
  2316. break;
  2317. case PCI_DEVICE_ID_QLOGIC_ISP2200:
  2318. ha->isp_type |= DT_ISP2200;
  2319. ha->device_type &= ~DT_EXTENDED_IDS;
  2320. ha->fw_srisc_address = RISC_START_ADDRESS_2100;
  2321. break;
  2322. case PCI_DEVICE_ID_QLOGIC_ISP2300:
  2323. ha->isp_type |= DT_ISP2300;
  2324. ha->device_type |= DT_ZIO_SUPPORTED;
  2325. ha->fw_srisc_address = RISC_START_ADDRESS_2300;
  2326. break;
  2327. case PCI_DEVICE_ID_QLOGIC_ISP2312:
  2328. ha->isp_type |= DT_ISP2312;
  2329. ha->device_type |= DT_ZIO_SUPPORTED;
  2330. ha->fw_srisc_address = RISC_START_ADDRESS_2300;
  2331. break;
  2332. case PCI_DEVICE_ID_QLOGIC_ISP2322:
  2333. ha->isp_type |= DT_ISP2322;
  2334. ha->device_type |= DT_ZIO_SUPPORTED;
  2335. if (ha->pdev->subsystem_vendor == 0x1028 &&
  2336. ha->pdev->subsystem_device == 0x0170)
  2337. ha->device_type |= DT_OEM_001;
  2338. ha->fw_srisc_address = RISC_START_ADDRESS_2300;
  2339. break;
  2340. case PCI_DEVICE_ID_QLOGIC_ISP6312:
  2341. ha->isp_type |= DT_ISP6312;
  2342. ha->fw_srisc_address = RISC_START_ADDRESS_2300;
  2343. break;
  2344. case PCI_DEVICE_ID_QLOGIC_ISP6322:
  2345. ha->isp_type |= DT_ISP6322;
  2346. ha->fw_srisc_address = RISC_START_ADDRESS_2300;
  2347. break;
  2348. case PCI_DEVICE_ID_QLOGIC_ISP2422:
  2349. ha->isp_type |= DT_ISP2422;
  2350. ha->device_type |= DT_ZIO_SUPPORTED;
  2351. ha->device_type |= DT_FWI2;
  2352. ha->device_type |= DT_IIDMA;
  2353. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  2354. break;
  2355. case PCI_DEVICE_ID_QLOGIC_ISP2432:
  2356. ha->isp_type |= DT_ISP2432;
  2357. ha->device_type |= DT_ZIO_SUPPORTED;
  2358. ha->device_type |= DT_FWI2;
  2359. ha->device_type |= DT_IIDMA;
  2360. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  2361. break;
  2362. case PCI_DEVICE_ID_QLOGIC_ISP8432:
  2363. ha->isp_type |= DT_ISP8432;
  2364. ha->device_type |= DT_ZIO_SUPPORTED;
  2365. ha->device_type |= DT_FWI2;
  2366. ha->device_type |= DT_IIDMA;
  2367. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  2368. break;
  2369. case PCI_DEVICE_ID_QLOGIC_ISP5422:
  2370. ha->isp_type |= DT_ISP5422;
  2371. ha->device_type |= DT_FWI2;
  2372. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  2373. break;
  2374. case PCI_DEVICE_ID_QLOGIC_ISP5432:
  2375. ha->isp_type |= DT_ISP5432;
  2376. ha->device_type |= DT_FWI2;
  2377. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  2378. break;
  2379. case PCI_DEVICE_ID_QLOGIC_ISP2532:
  2380. ha->isp_type |= DT_ISP2532;
  2381. ha->device_type |= DT_ZIO_SUPPORTED;
  2382. ha->device_type |= DT_FWI2;
  2383. ha->device_type |= DT_IIDMA;
  2384. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  2385. break;
  2386. case PCI_DEVICE_ID_QLOGIC_ISP8001:
  2387. ha->isp_type |= DT_ISP8001;
  2388. ha->device_type |= DT_ZIO_SUPPORTED;
  2389. ha->device_type |= DT_FWI2;
  2390. ha->device_type |= DT_IIDMA;
  2391. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  2392. break;
  2393. case PCI_DEVICE_ID_QLOGIC_ISP8021:
  2394. ha->isp_type |= DT_ISP8021;
  2395. ha->device_type |= DT_ZIO_SUPPORTED;
  2396. ha->device_type |= DT_FWI2;
  2397. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  2398. /* Initialize 82XX ISP flags */
  2399. qla82xx_init_flags(ha);
  2400. break;
  2401. case PCI_DEVICE_ID_QLOGIC_ISP8044:
  2402. ha->isp_type |= DT_ISP8044;
  2403. ha->device_type |= DT_ZIO_SUPPORTED;
  2404. ha->device_type |= DT_FWI2;
  2405. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  2406. /* Initialize 82XX ISP flags */
  2407. qla82xx_init_flags(ha);
  2408. break;
  2409. case PCI_DEVICE_ID_QLOGIC_ISP2031:
  2410. ha->isp_type |= DT_ISP2031;
  2411. ha->device_type |= DT_ZIO_SUPPORTED;
  2412. ha->device_type |= DT_FWI2;
  2413. ha->device_type |= DT_IIDMA;
  2414. ha->device_type |= DT_T10_PI;
  2415. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  2416. break;
  2417. case PCI_DEVICE_ID_QLOGIC_ISP8031:
  2418. ha->isp_type |= DT_ISP8031;
  2419. ha->device_type |= DT_ZIO_SUPPORTED;
  2420. ha->device_type |= DT_FWI2;
  2421. ha->device_type |= DT_IIDMA;
  2422. ha->device_type |= DT_T10_PI;
  2423. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  2424. break;
  2425. case PCI_DEVICE_ID_QLOGIC_ISPF001:
  2426. ha->isp_type |= DT_ISPFX00;
  2427. break;
  2428. case PCI_DEVICE_ID_QLOGIC_ISP2071:
  2429. ha->isp_type |= DT_ISP2071;
  2430. ha->device_type |= DT_ZIO_SUPPORTED;
  2431. ha->device_type |= DT_FWI2;
  2432. ha->device_type |= DT_IIDMA;
  2433. ha->device_type |= DT_T10_PI;
  2434. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  2435. break;
  2436. case PCI_DEVICE_ID_QLOGIC_ISP2271:
  2437. ha->isp_type |= DT_ISP2271;
  2438. ha->device_type |= DT_ZIO_SUPPORTED;
  2439. ha->device_type |= DT_FWI2;
  2440. ha->device_type |= DT_IIDMA;
  2441. ha->device_type |= DT_T10_PI;
  2442. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  2443. break;
  2444. case PCI_DEVICE_ID_QLOGIC_ISP2261:
  2445. ha->isp_type |= DT_ISP2261;
  2446. ha->device_type |= DT_ZIO_SUPPORTED;
  2447. ha->device_type |= DT_FWI2;
  2448. ha->device_type |= DT_IIDMA;
  2449. ha->device_type |= DT_T10_PI;
  2450. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  2451. break;
  2452. case PCI_DEVICE_ID_QLOGIC_ISP2081:
  2453. case PCI_DEVICE_ID_QLOGIC_ISP2089:
  2454. ha->isp_type |= DT_ISP2081;
  2455. ha->device_type |= DT_ZIO_SUPPORTED;
  2456. ha->device_type |= DT_FWI2;
  2457. ha->device_type |= DT_IIDMA;
  2458. ha->device_type |= DT_T10_PI;
  2459. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  2460. break;
  2461. case PCI_DEVICE_ID_QLOGIC_ISP2281:
  2462. case PCI_DEVICE_ID_QLOGIC_ISP2289:
  2463. ha->isp_type |= DT_ISP2281;
  2464. ha->device_type |= DT_ZIO_SUPPORTED;
  2465. ha->device_type |= DT_FWI2;
  2466. ha->device_type |= DT_IIDMA;
  2467. ha->device_type |= DT_T10_PI;
  2468. ha->fw_srisc_address = RISC_START_ADDRESS_2400;
  2469. break;
  2470. }
  2471. if (IS_QLA82XX(ha))
  2472. ha->port_no = ha->portnum & 1;
  2473. else {
  2474. /* Get adapter physical port no from interrupt pin register. */
  2475. pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
  2476. if (IS_QLA25XX(ha) || IS_QLA2031(ha) ||
  2477. IS_QLA27XX(ha) || IS_QLA28XX(ha))
  2478. ha->port_no--;
  2479. else
  2480. ha->port_no = !(ha->port_no & 1);
  2481. }
  2482. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
  2483. "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
  2484. ha->device_type, ha->port_no, ha->fw_srisc_address);
  2485. }
  2486. static void
  2487. qla2xxx_scan_start(struct Scsi_Host *shost)
  2488. {
  2489. scsi_qla_host_t *vha = shost_priv(shost);
  2490. if (vha->hw->flags.running_gold_fw)
  2491. return;
  2492. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  2493. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  2494. set_bit(RSCN_UPDATE, &vha->dpc_flags);
  2495. set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
  2496. }
  2497. static int
  2498. qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
  2499. {
  2500. scsi_qla_host_t *vha = shost_priv(shost);
  2501. if (test_bit(UNLOADING, &vha->dpc_flags))
  2502. return 1;
  2503. if (!vha->host)
  2504. return 1;
  2505. if (time > vha->hw->loop_reset_delay * HZ)
  2506. return 1;
  2507. return atomic_read(&vha->loop_state) == LOOP_READY;
  2508. }
  2509. static void qla_heartbeat_work_fn(struct work_struct *work)
  2510. {
  2511. struct qla_hw_data *ha = container_of(work,
  2512. struct qla_hw_data, heartbeat_work);
  2513. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  2514. if (!ha->flags.mbox_busy && base_vha->flags.init_done)
  2515. qla_no_op_mb(base_vha);
  2516. }
  2517. static void qla2x00_iocb_work_fn(struct work_struct *work)
  2518. {
  2519. struct scsi_qla_host *vha = container_of(work,
  2520. struct scsi_qla_host, iocb_work);
  2521. struct qla_hw_data *ha = vha->hw;
  2522. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  2523. int i = 2;
  2524. unsigned long flags;
  2525. if (test_bit(UNLOADING, &base_vha->dpc_flags))
  2526. return;
  2527. while (!list_empty(&vha->work_list) && i > 0) {
  2528. qla2x00_do_work(vha);
  2529. i--;
  2530. }
  2531. spin_lock_irqsave(&vha->work_lock, flags);
  2532. clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags);
  2533. spin_unlock_irqrestore(&vha->work_lock, flags);
  2534. }
  2535. static void
  2536. qla_trace_init(void)
  2537. {
  2538. qla_trc_array = trace_array_get_by_name("qla2xxx");
  2539. if (!qla_trc_array) {
  2540. ql_log(ql_log_fatal, NULL, 0x0001,
  2541. "Unable to create qla2xxx trace instance, instance logging will be disabled.\n");
  2542. return;
  2543. }
  2544. QLA_TRACE_ENABLE(qla_trc_array);
  2545. }
  2546. static void
  2547. qla_trace_uninit(void)
  2548. {
  2549. if (!qla_trc_array)
  2550. return;
  2551. trace_array_put(qla_trc_array);
  2552. }
  2553. /*
  2554. * PCI driver interface
  2555. */
  2556. static int
  2557. qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
  2558. {
  2559. int ret = -ENODEV;
  2560. struct Scsi_Host *host;
  2561. scsi_qla_host_t *base_vha = NULL;
  2562. struct qla_hw_data *ha;
  2563. char pci_info[30];
  2564. char fw_str[30], wq_name[30];
  2565. struct scsi_host_template *sht;
  2566. int bars, mem_only = 0;
  2567. uint16_t req_length = 0, rsp_length = 0;
  2568. struct req_que *req = NULL;
  2569. struct rsp_que *rsp = NULL;
  2570. int i;
  2571. bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
  2572. sht = &qla2xxx_driver_template;
  2573. if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
  2574. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
  2575. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
  2576. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
  2577. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
  2578. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
  2579. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
  2580. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
  2581. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
  2582. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
  2583. pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
  2584. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
  2585. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
  2586. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
  2587. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 ||
  2588. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 ||
  2589. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 ||
  2590. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 ||
  2591. pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) {
  2592. bars = pci_select_bars(pdev, IORESOURCE_MEM);
  2593. mem_only = 1;
  2594. ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
  2595. "Mem only adapter.\n");
  2596. }
  2597. ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
  2598. "Bars=%d.\n", bars);
  2599. if (mem_only) {
  2600. if (pci_enable_device_mem(pdev))
  2601. return ret;
  2602. } else {
  2603. if (pci_enable_device(pdev))
  2604. return ret;
  2605. }
  2606. if (is_kdump_kernel()) {
  2607. ql2xmqsupport = 0;
  2608. ql2xallocfwdump = 0;
  2609. }
  2610. /* This may fail but that's ok */
  2611. pci_enable_pcie_error_reporting(pdev);
  2612. ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
  2613. if (!ha) {
  2614. ql_log_pci(ql_log_fatal, pdev, 0x0009,
  2615. "Unable to allocate memory for ha.\n");
  2616. goto disable_device;
  2617. }
  2618. ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
  2619. "Memory allocated for ha=%p.\n", ha);
  2620. ha->pdev = pdev;
  2621. INIT_LIST_HEAD(&ha->tgt.q_full_list);
  2622. spin_lock_init(&ha->tgt.q_full_lock);
  2623. spin_lock_init(&ha->tgt.sess_lock);
  2624. spin_lock_init(&ha->tgt.atio_lock);
  2625. spin_lock_init(&ha->sadb_lock);
  2626. INIT_LIST_HEAD(&ha->sadb_tx_index_list);
  2627. INIT_LIST_HEAD(&ha->sadb_rx_index_list);
  2628. spin_lock_init(&ha->sadb_fp_lock);
  2629. if (qla_edif_sadb_build_free_pool(ha)) {
  2630. kfree(ha);
  2631. goto disable_device;
  2632. }
  2633. atomic_set(&ha->nvme_active_aen_cnt, 0);
  2634. /* Clear our data area */
  2635. ha->bars = bars;
  2636. ha->mem_only = mem_only;
  2637. spin_lock_init(&ha->hardware_lock);
  2638. spin_lock_init(&ha->vport_slock);
  2639. mutex_init(&ha->selflogin_lock);
  2640. mutex_init(&ha->optrom_mutex);
  2641. /* Set ISP-type information. */
  2642. qla2x00_set_isp_flags(ha);
  2643. /* Set EEH reset type to fundamental if required by hba */
  2644. if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
  2645. IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
  2646. pdev->needs_freset = 1;
  2647. ha->prev_topology = 0;
  2648. ha->init_cb_size = sizeof(init_cb_t);
  2649. ha->link_data_rate = PORT_SPEED_UNKNOWN;
  2650. ha->optrom_size = OPTROM_SIZE_2300;
  2651. ha->max_exchg = FW_MAX_EXCHANGES_CNT;
  2652. atomic_set(&ha->num_pend_mbx_stage1, 0);
  2653. atomic_set(&ha->num_pend_mbx_stage2, 0);
  2654. atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
  2655. ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
  2656. INIT_LIST_HEAD(&ha->tmf_pending);
  2657. INIT_LIST_HEAD(&ha->tmf_active);
  2658. /* Assign ISP specific operations. */
  2659. if (IS_QLA2100(ha)) {
  2660. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
  2661. ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
  2662. req_length = REQUEST_ENTRY_CNT_2100;
  2663. rsp_length = RESPONSE_ENTRY_CNT_2100;
  2664. ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
  2665. ha->gid_list_info_size = 4;
  2666. ha->flash_conf_off = ~0;
  2667. ha->flash_data_off = ~0;
  2668. ha->nvram_conf_off = ~0;
  2669. ha->nvram_data_off = ~0;
  2670. ha->isp_ops = &qla2100_isp_ops;
  2671. } else if (IS_QLA2200(ha)) {
  2672. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
  2673. ha->mbx_count = MAILBOX_REGISTER_COUNT_2200;
  2674. req_length = REQUEST_ENTRY_CNT_2200;
  2675. rsp_length = RESPONSE_ENTRY_CNT_2100;
  2676. ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
  2677. ha->gid_list_info_size = 4;
  2678. ha->flash_conf_off = ~0;
  2679. ha->flash_data_off = ~0;
  2680. ha->nvram_conf_off = ~0;
  2681. ha->nvram_data_off = ~0;
  2682. ha->isp_ops = &qla2100_isp_ops;
  2683. } else if (IS_QLA23XX(ha)) {
  2684. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
  2685. ha->mbx_count = MAILBOX_REGISTER_COUNT;
  2686. req_length = REQUEST_ENTRY_CNT_2200;
  2687. rsp_length = RESPONSE_ENTRY_CNT_2300;
  2688. ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
  2689. ha->gid_list_info_size = 6;
  2690. if (IS_QLA2322(ha) || IS_QLA6322(ha))
  2691. ha->optrom_size = OPTROM_SIZE_2322;
  2692. ha->flash_conf_off = ~0;
  2693. ha->flash_data_off = ~0;
  2694. ha->nvram_conf_off = ~0;
  2695. ha->nvram_data_off = ~0;
  2696. ha->isp_ops = &qla2300_isp_ops;
  2697. } else if (IS_QLA24XX_TYPE(ha)) {
  2698. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
  2699. ha->mbx_count = MAILBOX_REGISTER_COUNT;
  2700. req_length = REQUEST_ENTRY_CNT_24XX;
  2701. rsp_length = RESPONSE_ENTRY_CNT_2300;
  2702. ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
  2703. ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
  2704. ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
  2705. ha->gid_list_info_size = 8;
  2706. ha->optrom_size = OPTROM_SIZE_24XX;
  2707. ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
  2708. ha->isp_ops = &qla24xx_isp_ops;
  2709. ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
  2710. ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
  2711. ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
  2712. ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
  2713. } else if (IS_QLA25XX(ha)) {
  2714. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
  2715. ha->mbx_count = MAILBOX_REGISTER_COUNT;
  2716. req_length = REQUEST_ENTRY_CNT_24XX;
  2717. rsp_length = RESPONSE_ENTRY_CNT_2300;
  2718. ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
  2719. ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
  2720. ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
  2721. ha->gid_list_info_size = 8;
  2722. ha->optrom_size = OPTROM_SIZE_25XX;
  2723. ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
  2724. ha->isp_ops = &qla25xx_isp_ops;
  2725. ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
  2726. ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
  2727. ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
  2728. ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
  2729. } else if (IS_QLA81XX(ha)) {
  2730. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
  2731. ha->mbx_count = MAILBOX_REGISTER_COUNT;
  2732. req_length = REQUEST_ENTRY_CNT_24XX;
  2733. rsp_length = RESPONSE_ENTRY_CNT_2300;
  2734. ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
  2735. ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
  2736. ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
  2737. ha->gid_list_info_size = 8;
  2738. ha->optrom_size = OPTROM_SIZE_81XX;
  2739. ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
  2740. ha->isp_ops = &qla81xx_isp_ops;
  2741. ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
  2742. ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
  2743. ha->nvram_conf_off = ~0;
  2744. ha->nvram_data_off = ~0;
  2745. } else if (IS_QLA82XX(ha)) {
  2746. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
  2747. ha->mbx_count = MAILBOX_REGISTER_COUNT;
  2748. req_length = REQUEST_ENTRY_CNT_82XX;
  2749. rsp_length = RESPONSE_ENTRY_CNT_82XX;
  2750. ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
  2751. ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
  2752. ha->gid_list_info_size = 8;
  2753. ha->optrom_size = OPTROM_SIZE_82XX;
  2754. ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
  2755. ha->isp_ops = &qla82xx_isp_ops;
  2756. ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
  2757. ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
  2758. ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
  2759. ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
  2760. } else if (IS_QLA8044(ha)) {
  2761. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
  2762. ha->mbx_count = MAILBOX_REGISTER_COUNT;
  2763. req_length = REQUEST_ENTRY_CNT_82XX;
  2764. rsp_length = RESPONSE_ENTRY_CNT_82XX;
  2765. ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
  2766. ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
  2767. ha->gid_list_info_size = 8;
  2768. ha->optrom_size = OPTROM_SIZE_83XX;
  2769. ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
  2770. ha->isp_ops = &qla8044_isp_ops;
  2771. ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
  2772. ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
  2773. ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
  2774. ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
  2775. } else if (IS_QLA83XX(ha)) {
  2776. ha->portnum = PCI_FUNC(ha->pdev->devfn);
  2777. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
  2778. ha->mbx_count = MAILBOX_REGISTER_COUNT;
  2779. req_length = REQUEST_ENTRY_CNT_83XX;
  2780. rsp_length = RESPONSE_ENTRY_CNT_83XX;
  2781. ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
  2782. ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
  2783. ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
  2784. ha->gid_list_info_size = 8;
  2785. ha->optrom_size = OPTROM_SIZE_83XX;
  2786. ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
  2787. ha->isp_ops = &qla83xx_isp_ops;
  2788. ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
  2789. ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
  2790. ha->nvram_conf_off = ~0;
  2791. ha->nvram_data_off = ~0;
  2792. } else if (IS_QLAFX00(ha)) {
  2793. ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00;
  2794. ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00;
  2795. ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
  2796. req_length = REQUEST_ENTRY_CNT_FX00;
  2797. rsp_length = RESPONSE_ENTRY_CNT_FX00;
  2798. ha->isp_ops = &qlafx00_isp_ops;
  2799. ha->port_down_retry_count = 30; /* default value */
  2800. ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
  2801. ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
  2802. ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL;
  2803. ha->mr.fw_hbt_en = 1;
  2804. ha->mr.host_info_resend = false;
  2805. ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL;
  2806. } else if (IS_QLA27XX(ha)) {
  2807. ha->portnum = PCI_FUNC(ha->pdev->devfn);
  2808. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
  2809. ha->mbx_count = MAILBOX_REGISTER_COUNT;
  2810. req_length = REQUEST_ENTRY_CNT_83XX;
  2811. rsp_length = RESPONSE_ENTRY_CNT_83XX;
  2812. ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
  2813. ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
  2814. ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
  2815. ha->gid_list_info_size = 8;
  2816. ha->optrom_size = OPTROM_SIZE_83XX;
  2817. ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
  2818. ha->isp_ops = &qla27xx_isp_ops;
  2819. ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
  2820. ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
  2821. ha->nvram_conf_off = ~0;
  2822. ha->nvram_data_off = ~0;
  2823. } else if (IS_QLA28XX(ha)) {
  2824. ha->portnum = PCI_FUNC(ha->pdev->devfn);
  2825. ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
  2826. ha->mbx_count = MAILBOX_REGISTER_COUNT;
  2827. req_length = REQUEST_ENTRY_CNT_83XX;
  2828. rsp_length = RESPONSE_ENTRY_CNT_83XX;
  2829. ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
  2830. ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
  2831. ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
  2832. ha->gid_list_info_size = 8;
  2833. ha->optrom_size = OPTROM_SIZE_28XX;
  2834. ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
  2835. ha->isp_ops = &qla27xx_isp_ops;
  2836. ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX;
  2837. ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX;
  2838. ha->nvram_conf_off = ~0;
  2839. ha->nvram_data_off = ~0;
  2840. }
  2841. ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
  2842. "mbx_count=%d, req_length=%d, "
  2843. "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
  2844. "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, "
  2845. "max_fibre_devices=%d.\n",
  2846. ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
  2847. ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
  2848. ha->nvram_npiv_size, ha->max_fibre_devices);
  2849. ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
  2850. "isp_ops=%p, flash_conf_off=%d, "
  2851. "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
  2852. ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
  2853. ha->nvram_conf_off, ha->nvram_data_off);
  2854. /* Configure PCI I/O space */
  2855. ret = ha->isp_ops->iospace_config(ha);
  2856. if (ret)
  2857. goto iospace_config_failed;
  2858. ql_log_pci(ql_log_info, pdev, 0x001d,
  2859. "Found an ISP%04X irq %d iobase 0x%p.\n",
  2860. pdev->device, pdev->irq, ha->iobase);
  2861. mutex_init(&ha->vport_lock);
  2862. mutex_init(&ha->mq_lock);
  2863. init_completion(&ha->mbx_cmd_comp);
  2864. complete(&ha->mbx_cmd_comp);
  2865. init_completion(&ha->mbx_intr_comp);
  2866. init_completion(&ha->dcbx_comp);
  2867. init_completion(&ha->lb_portup_comp);
  2868. set_bit(0, (unsigned long *) ha->vp_idx_map);
  2869. qla2x00_config_dma_addressing(ha);
  2870. ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
  2871. "64 Bit addressing is %s.\n",
  2872. ha->flags.enable_64bit_addressing ? "enable" :
  2873. "disable");
  2874. ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
  2875. if (ret) {
  2876. ql_log_pci(ql_log_fatal, pdev, 0x0031,
  2877. "Failed to allocate memory for adapter, aborting.\n");
  2878. goto probe_hw_failed;
  2879. }
  2880. req->max_q_depth = MAX_Q_DEPTH;
  2881. if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
  2882. req->max_q_depth = ql2xmaxqdepth;
  2883. base_vha = qla2x00_create_host(sht, ha);
  2884. if (!base_vha) {
  2885. ret = -ENOMEM;
  2886. goto probe_hw_failed;
  2887. }
  2888. pci_set_drvdata(pdev, base_vha);
  2889. set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
  2890. host = base_vha->host;
  2891. base_vha->req = req;
  2892. if (IS_QLA2XXX_MIDTYPE(ha))
  2893. base_vha->mgmt_svr_loop_id =
  2894. qla2x00_reserve_mgmt_server_loop_id(base_vha);
  2895. else
  2896. base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
  2897. base_vha->vp_idx;
  2898. /* Setup fcport template structure. */
  2899. ha->mr.fcport.vha = base_vha;
  2900. ha->mr.fcport.port_type = FCT_UNKNOWN;
  2901. ha->mr.fcport.loop_id = FC_NO_LOOP_ID;
  2902. qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED);
  2903. ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED;
  2904. ha->mr.fcport.scan_state = 1;
  2905. qla2xxx_reset_stats(host, QLA2XX_HW_ERROR | QLA2XX_SHT_LNK_DWN |
  2906. QLA2XX_INT_ERR | QLA2XX_CMD_TIMEOUT |
  2907. QLA2XX_RESET_CMD_ERR | QLA2XX_TGT_SHT_LNK_DOWN);
  2908. /* Set the SG table size based on ISP type */
  2909. if (!IS_FWI2_CAPABLE(ha)) {
  2910. if (IS_QLA2100(ha))
  2911. host->sg_tablesize = 32;
  2912. } else {
  2913. if (!IS_QLA82XX(ha))
  2914. host->sg_tablesize = QLA_SG_ALL;
  2915. }
  2916. host->max_id = ha->max_fibre_devices;
  2917. host->cmd_per_lun = 3;
  2918. host->unique_id = host->host_no;
  2919. if (ql2xenabledif && ql2xenabledif != 2) {
  2920. ql_log(ql_log_warn, base_vha, 0x302d,
  2921. "Invalid value for ql2xenabledif, resetting it to default (2)\n");
  2922. ql2xenabledif = 2;
  2923. }
  2924. if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
  2925. host->max_cmd_len = 32;
  2926. else
  2927. host->max_cmd_len = MAX_CMDSZ;
  2928. host->max_channel = MAX_BUSES - 1;
  2929. /* Older HBAs support only 16-bit LUNs */
  2930. if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) &&
  2931. ql2xmaxlun > 0xffff)
  2932. host->max_lun = 0xffff;
  2933. else
  2934. host->max_lun = ql2xmaxlun;
  2935. host->transportt = qla2xxx_transport_template;
  2936. sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
  2937. ql_dbg(ql_dbg_init, base_vha, 0x0033,
  2938. "max_id=%d this_id=%d "
  2939. "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
  2940. "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id,
  2941. host->this_id, host->cmd_per_lun, host->unique_id,
  2942. host->max_cmd_len, host->max_channel, host->max_lun,
  2943. host->transportt, sht->vendor_id);
  2944. INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
  2945. INIT_WORK(&ha->heartbeat_work, qla_heartbeat_work_fn);
  2946. /* Set up the irqs */
  2947. ret = qla2x00_request_irqs(ha, rsp);
  2948. if (ret)
  2949. goto probe_failed;
  2950. /* Alloc arrays of request and response ring ptrs */
  2951. ret = qla2x00_alloc_queues(ha, req, rsp);
  2952. if (ret) {
  2953. ql_log(ql_log_fatal, base_vha, 0x003d,
  2954. "Failed to allocate memory for queue pointers..."
  2955. "aborting.\n");
  2956. ret = -ENODEV;
  2957. goto probe_failed;
  2958. }
  2959. if (ha->mqenable) {
  2960. /* number of hardware queues supported by blk/scsi-mq*/
  2961. host->nr_hw_queues = ha->max_qpairs;
  2962. ql_dbg(ql_dbg_init, base_vha, 0x0192,
  2963. "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
  2964. } else {
  2965. if (ql2xnvmeenable) {
  2966. host->nr_hw_queues = ha->max_qpairs;
  2967. ql_dbg(ql_dbg_init, base_vha, 0x0194,
  2968. "FC-NVMe support is enabled, HW queues=%d\n",
  2969. host->nr_hw_queues);
  2970. } else {
  2971. ql_dbg(ql_dbg_init, base_vha, 0x0193,
  2972. "blk/scsi-mq disabled.\n");
  2973. }
  2974. }
  2975. qlt_probe_one_stage1(base_vha, ha);
  2976. pci_save_state(pdev);
  2977. /* Assign back pointers */
  2978. rsp->req = req;
  2979. req->rsp = rsp;
  2980. if (IS_QLAFX00(ha)) {
  2981. ha->rsp_q_map[0] = rsp;
  2982. ha->req_q_map[0] = req;
  2983. set_bit(0, ha->req_qid_map);
  2984. set_bit(0, ha->rsp_qid_map);
  2985. }
  2986. /* FWI2-capable only. */
  2987. req->req_q_in = &ha->iobase->isp24.req_q_in;
  2988. req->req_q_out = &ha->iobase->isp24.req_q_out;
  2989. rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
  2990. rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
  2991. if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
  2992. IS_QLA28XX(ha)) {
  2993. req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
  2994. req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
  2995. rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
  2996. rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
  2997. }
  2998. if (IS_QLAFX00(ha)) {
  2999. req->req_q_in = &ha->iobase->ispfx00.req_q_in;
  3000. req->req_q_out = &ha->iobase->ispfx00.req_q_out;
  3001. rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in;
  3002. rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
  3003. }
  3004. if (IS_P3P_TYPE(ha)) {
  3005. req->req_q_out = &ha->iobase->isp82.req_q_out[0];
  3006. rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
  3007. rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
  3008. }
  3009. ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
  3010. "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
  3011. ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
  3012. ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
  3013. "req->req_q_in=%p req->req_q_out=%p "
  3014. "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
  3015. req->req_q_in, req->req_q_out,
  3016. rsp->rsp_q_in, rsp->rsp_q_out);
  3017. ql_dbg(ql_dbg_init, base_vha, 0x003e,
  3018. "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
  3019. ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
  3020. ql_dbg(ql_dbg_init, base_vha, 0x003f,
  3021. "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
  3022. req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
  3023. ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
  3024. if (unlikely(!ha->wq)) {
  3025. ret = -ENOMEM;
  3026. goto probe_failed;
  3027. }
  3028. if (ha->isp_ops->initialize_adapter(base_vha)) {
  3029. ql_log(ql_log_fatal, base_vha, 0x00d6,
  3030. "Failed to initialize adapter - Adapter flags %x.\n",
  3031. base_vha->device_flags);
  3032. if (IS_QLA82XX(ha)) {
  3033. qla82xx_idc_lock(ha);
  3034. qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
  3035. QLA8XXX_DEV_FAILED);
  3036. qla82xx_idc_unlock(ha);
  3037. ql_log(ql_log_fatal, base_vha, 0x00d7,
  3038. "HW State: FAILED.\n");
  3039. } else if (IS_QLA8044(ha)) {
  3040. qla8044_idc_lock(ha);
  3041. qla8044_wr_direct(base_vha,
  3042. QLA8044_CRB_DEV_STATE_INDEX,
  3043. QLA8XXX_DEV_FAILED);
  3044. qla8044_idc_unlock(ha);
  3045. ql_log(ql_log_fatal, base_vha, 0x0150,
  3046. "HW State: FAILED.\n");
  3047. }
  3048. ret = -ENODEV;
  3049. goto probe_failed;
  3050. }
  3051. if (IS_QLAFX00(ha))
  3052. host->can_queue = QLAFX00_MAX_CANQUEUE;
  3053. else
  3054. host->can_queue = req->num_outstanding_cmds - 10;
  3055. ql_dbg(ql_dbg_init, base_vha, 0x0032,
  3056. "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
  3057. host->can_queue, base_vha->req,
  3058. base_vha->mgmt_svr_loop_id, host->sg_tablesize);
  3059. /* Check if FW supports MQ or not for ISP25xx */
  3060. if (IS_QLA25XX(ha) && !(ha->fw_attributes & BIT_6))
  3061. ha->mqenable = 0;
  3062. if (ha->mqenable) {
  3063. bool startit = false;
  3064. if (QLA_TGT_MODE_ENABLED())
  3065. startit = false;
  3066. if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED)
  3067. startit = true;
  3068. /* Create start of day qpairs for Block MQ */
  3069. for (i = 0; i < ha->max_qpairs; i++)
  3070. qla2xxx_create_qpair(base_vha, 5, 0, startit);
  3071. }
  3072. qla_init_iocb_limit(base_vha);
  3073. if (ha->flags.running_gold_fw)
  3074. goto skip_dpc;
  3075. /*
  3076. * Startup the kernel thread for this host adapter
  3077. */
  3078. ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
  3079. "%s_dpc", base_vha->host_str);
  3080. if (IS_ERR(ha->dpc_thread)) {
  3081. ql_log(ql_log_fatal, base_vha, 0x00ed,
  3082. "Failed to start DPC thread.\n");
  3083. ret = PTR_ERR(ha->dpc_thread);
  3084. ha->dpc_thread = NULL;
  3085. goto probe_failed;
  3086. }
  3087. ql_dbg(ql_dbg_init, base_vha, 0x00ee,
  3088. "DPC thread started successfully.\n");
  3089. /*
  3090. * If we're not coming up in initiator mode, we might sit for
  3091. * a while without waking up the dpc thread, which leads to a
  3092. * stuck process warning. So just kick the dpc once here and
  3093. * let the kthread start (and go back to sleep in qla2x00_do_dpc).
  3094. */
  3095. qla2xxx_wake_dpc(base_vha);
  3096. INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
  3097. if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
  3098. sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
  3099. ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
  3100. INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
  3101. sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
  3102. ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
  3103. INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
  3104. INIT_WORK(&ha->idc_state_handler,
  3105. qla83xx_idc_state_handler_work);
  3106. INIT_WORK(&ha->nic_core_unrecoverable,
  3107. qla83xx_nic_core_unrecoverable_work);
  3108. }
  3109. skip_dpc:
  3110. list_add_tail(&base_vha->list, &ha->vp_list);
  3111. base_vha->host->irq = ha->pdev->irq;
  3112. /* Initialized the timer */
  3113. qla2x00_start_timer(base_vha, WATCH_INTERVAL);
  3114. ql_dbg(ql_dbg_init, base_vha, 0x00ef,
  3115. "Started qla2x00_timer with "
  3116. "interval=%d.\n", WATCH_INTERVAL);
  3117. ql_dbg(ql_dbg_init, base_vha, 0x00f0,
  3118. "Detected hba at address=%p.\n",
  3119. ha);
  3120. if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
  3121. if (ha->fw_attributes & BIT_4) {
  3122. int prot = 0, guard;
  3123. base_vha->flags.difdix_supported = 1;
  3124. ql_dbg(ql_dbg_init, base_vha, 0x00f1,
  3125. "Registering for DIF/DIX type 1 and 3 protection.\n");
  3126. if (ql2xprotmask)
  3127. scsi_host_set_prot(host, ql2xprotmask);
  3128. else
  3129. scsi_host_set_prot(host,
  3130. prot | SHOST_DIF_TYPE1_PROTECTION
  3131. | SHOST_DIF_TYPE2_PROTECTION
  3132. | SHOST_DIF_TYPE3_PROTECTION
  3133. | SHOST_DIX_TYPE1_PROTECTION
  3134. | SHOST_DIX_TYPE2_PROTECTION
  3135. | SHOST_DIX_TYPE3_PROTECTION);
  3136. guard = SHOST_DIX_GUARD_CRC;
  3137. if (IS_PI_IPGUARD_CAPABLE(ha) &&
  3138. (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
  3139. guard |= SHOST_DIX_GUARD_IP;
  3140. if (ql2xprotguard)
  3141. scsi_host_set_guard(host, ql2xprotguard);
  3142. else
  3143. scsi_host_set_guard(host, guard);
  3144. } else
  3145. base_vha->flags.difdix_supported = 0;
  3146. }
  3147. ha->isp_ops->enable_intrs(ha);
  3148. if (IS_QLAFX00(ha)) {
  3149. ret = qlafx00_fx_disc(base_vha,
  3150. &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
  3151. host->sg_tablesize = (ha->mr.extended_io_enabled) ?
  3152. QLA_SG_ALL : 128;
  3153. }
  3154. ret = scsi_add_host(host, &pdev->dev);
  3155. if (ret)
  3156. goto probe_failed;
  3157. base_vha->flags.init_done = 1;
  3158. base_vha->flags.online = 1;
  3159. ha->prev_minidump_failed = 0;
  3160. ql_dbg(ql_dbg_init, base_vha, 0x00f2,
  3161. "Init done and hba is online.\n");
  3162. if (qla_ini_mode_enabled(base_vha) ||
  3163. qla_dual_mode_enabled(base_vha))
  3164. scsi_scan_host(host);
  3165. else
  3166. ql_log(ql_log_info, base_vha, 0x0122,
  3167. "skipping scsi_scan_host() for non-initiator port\n");
  3168. qla2x00_alloc_sysfs_attr(base_vha);
  3169. if (IS_QLAFX00(ha)) {
  3170. ret = qlafx00_fx_disc(base_vha,
  3171. &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
  3172. /* Register system information */
  3173. ret = qlafx00_fx_disc(base_vha,
  3174. &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO);
  3175. }
  3176. qla2x00_init_host_attr(base_vha);
  3177. qla2x00_dfs_setup(base_vha);
  3178. ql_log(ql_log_info, base_vha, 0x00fb,
  3179. "QLogic %s - %s.\n", ha->model_number, ha->model_desc);
  3180. ql_log(ql_log_info, base_vha, 0x00fc,
  3181. "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
  3182. pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info,
  3183. sizeof(pci_info)),
  3184. pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
  3185. base_vha->host_no,
  3186. ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
  3187. qlt_add_target(ha, base_vha);
  3188. clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
  3189. if (test_bit(UNLOADING, &base_vha->dpc_flags))
  3190. return -ENODEV;
  3191. return 0;
  3192. probe_failed:
  3193. qla_enode_stop(base_vha);
  3194. qla_edb_stop(base_vha);
  3195. vfree(base_vha->scan.l);
  3196. if (base_vha->gnl.l) {
  3197. dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
  3198. base_vha->gnl.l, base_vha->gnl.ldma);
  3199. base_vha->gnl.l = NULL;
  3200. }
  3201. if (base_vha->timer_active)
  3202. qla2x00_stop_timer(base_vha);
  3203. base_vha->flags.online = 0;
  3204. if (ha->dpc_thread) {
  3205. struct task_struct *t = ha->dpc_thread;
  3206. ha->dpc_thread = NULL;
  3207. kthread_stop(t);
  3208. }
  3209. qla2x00_free_device(base_vha);
  3210. scsi_host_put(base_vha->host);
  3211. /*
  3212. * Need to NULL out local req/rsp after
  3213. * qla2x00_free_device => qla2x00_free_queues frees
  3214. * what these are pointing to. Or else we'll
  3215. * fall over below in qla2x00_free_req/rsp_que.
  3216. */
  3217. req = NULL;
  3218. rsp = NULL;
  3219. probe_hw_failed:
  3220. qla2x00_mem_free(ha);
  3221. qla2x00_free_req_que(ha, req);
  3222. qla2x00_free_rsp_que(ha, rsp);
  3223. qla2x00_clear_drv_active(ha);
  3224. iospace_config_failed:
  3225. if (IS_P3P_TYPE(ha)) {
  3226. if (!ha->nx_pcibase)
  3227. iounmap((device_reg_t *)ha->nx_pcibase);
  3228. if (!ql2xdbwr)
  3229. iounmap((device_reg_t *)ha->nxdb_wr_ptr);
  3230. } else {
  3231. if (ha->iobase)
  3232. iounmap(ha->iobase);
  3233. if (ha->cregbase)
  3234. iounmap(ha->cregbase);
  3235. }
  3236. pci_release_selected_regions(ha->pdev, ha->bars);
  3237. kfree(ha);
  3238. disable_device:
  3239. pci_disable_device(pdev);
  3240. return ret;
  3241. }
  3242. static void __qla_set_remove_flag(scsi_qla_host_t *base_vha)
  3243. {
  3244. scsi_qla_host_t *vp;
  3245. unsigned long flags;
  3246. struct qla_hw_data *ha;
  3247. if (!base_vha)
  3248. return;
  3249. ha = base_vha->hw;
  3250. spin_lock_irqsave(&ha->vport_slock, flags);
  3251. list_for_each_entry(vp, &ha->vp_list, list)
  3252. set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags);
  3253. /*
  3254. * Indicate device removal to prevent future board_disable
  3255. * and wait until any pending board_disable has completed.
  3256. */
  3257. set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
  3258. spin_unlock_irqrestore(&ha->vport_slock, flags);
  3259. }
  3260. static void
  3261. qla2x00_shutdown(struct pci_dev *pdev)
  3262. {
  3263. scsi_qla_host_t *vha;
  3264. struct qla_hw_data *ha;
  3265. vha = pci_get_drvdata(pdev);
  3266. ha = vha->hw;
  3267. ql_log(ql_log_info, vha, 0xfffa,
  3268. "Adapter shutdown\n");
  3269. /*
  3270. * Prevent future board_disable and wait
  3271. * until any pending board_disable has completed.
  3272. */
  3273. __qla_set_remove_flag(vha);
  3274. cancel_work_sync(&ha->board_disable);
  3275. if (!atomic_read(&pdev->enable_cnt))
  3276. return;
  3277. /* Notify ISPFX00 firmware */
  3278. if (IS_QLAFX00(ha))
  3279. qlafx00_driver_shutdown(vha, 20);
  3280. /* Turn-off FCE trace */
  3281. if (ha->flags.fce_enabled) {
  3282. qla2x00_disable_fce_trace(vha, NULL, NULL);
  3283. ha->flags.fce_enabled = 0;
  3284. }
  3285. /* Turn-off EFT trace */
  3286. if (ha->eft)
  3287. qla2x00_disable_eft_trace(vha);
  3288. if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
  3289. IS_QLA28XX(ha)) {
  3290. if (ha->flags.fw_started)
  3291. qla2x00_abort_isp_cleanup(vha);
  3292. } else {
  3293. /* Stop currently executing firmware. */
  3294. qla2x00_try_to_stop_firmware(vha);
  3295. }
  3296. /* Disable timer */
  3297. if (vha->timer_active)
  3298. qla2x00_stop_timer(vha);
  3299. /* Turn adapter off line */
  3300. vha->flags.online = 0;
  3301. /* turn-off interrupts on the card */
  3302. if (ha->interrupts_on) {
  3303. vha->flags.init_done = 0;
  3304. ha->isp_ops->disable_intrs(ha);
  3305. }
  3306. qla2x00_free_irqs(vha);
  3307. qla2x00_free_fw_dump(ha);
  3308. pci_disable_device(pdev);
  3309. ql_log(ql_log_info, vha, 0xfffe,
  3310. "Adapter shutdown successfully.\n");
  3311. }
  3312. /* Deletes all the virtual ports for a given ha */
  3313. static void
  3314. qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
  3315. {
  3316. scsi_qla_host_t *vha;
  3317. unsigned long flags;
  3318. mutex_lock(&ha->vport_lock);
  3319. while (ha->cur_vport_count) {
  3320. spin_lock_irqsave(&ha->vport_slock, flags);
  3321. BUG_ON(base_vha->list.next == &ha->vp_list);
  3322. /* This assumes first entry in ha->vp_list is always base vha */
  3323. vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
  3324. scsi_host_get(vha->host);
  3325. spin_unlock_irqrestore(&ha->vport_slock, flags);
  3326. mutex_unlock(&ha->vport_lock);
  3327. qla_nvme_delete(vha);
  3328. fc_vport_terminate(vha->fc_vport);
  3329. scsi_host_put(vha->host);
  3330. mutex_lock(&ha->vport_lock);
  3331. }
  3332. mutex_unlock(&ha->vport_lock);
  3333. }
  3334. /* Stops all deferred work threads */
  3335. static void
  3336. qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
  3337. {
  3338. /* Cancel all work and destroy DPC workqueues */
  3339. if (ha->dpc_lp_wq) {
  3340. cancel_work_sync(&ha->idc_aen);
  3341. destroy_workqueue(ha->dpc_lp_wq);
  3342. ha->dpc_lp_wq = NULL;
  3343. }
  3344. if (ha->dpc_hp_wq) {
  3345. cancel_work_sync(&ha->nic_core_reset);
  3346. cancel_work_sync(&ha->idc_state_handler);
  3347. cancel_work_sync(&ha->nic_core_unrecoverable);
  3348. destroy_workqueue(ha->dpc_hp_wq);
  3349. ha->dpc_hp_wq = NULL;
  3350. }
  3351. /* Kill the kernel thread for this host */
  3352. if (ha->dpc_thread) {
  3353. struct task_struct *t = ha->dpc_thread;
  3354. /*
  3355. * qla2xxx_wake_dpc checks for ->dpc_thread
  3356. * so we need to zero it out.
  3357. */
  3358. ha->dpc_thread = NULL;
  3359. kthread_stop(t);
  3360. }
  3361. }
  3362. static void
  3363. qla2x00_unmap_iobases(struct qla_hw_data *ha)
  3364. {
  3365. if (IS_QLA82XX(ha)) {
  3366. iounmap((device_reg_t *)ha->nx_pcibase);
  3367. if (!ql2xdbwr)
  3368. iounmap((device_reg_t *)ha->nxdb_wr_ptr);
  3369. } else {
  3370. if (ha->iobase)
  3371. iounmap(ha->iobase);
  3372. if (ha->cregbase)
  3373. iounmap(ha->cregbase);
  3374. if (ha->mqiobase)
  3375. iounmap(ha->mqiobase);
  3376. if (ha->msixbase)
  3377. iounmap(ha->msixbase);
  3378. }
  3379. }
  3380. static void
  3381. qla2x00_clear_drv_active(struct qla_hw_data *ha)
  3382. {
  3383. if (IS_QLA8044(ha)) {
  3384. qla8044_idc_lock(ha);
  3385. qla8044_clear_drv_active(ha);
  3386. qla8044_idc_unlock(ha);
  3387. } else if (IS_QLA82XX(ha)) {
  3388. qla82xx_idc_lock(ha);
  3389. qla82xx_clear_drv_active(ha);
  3390. qla82xx_idc_unlock(ha);
  3391. }
  3392. }
  3393. static void
  3394. qla2x00_remove_one(struct pci_dev *pdev)
  3395. {
  3396. scsi_qla_host_t *base_vha;
  3397. struct qla_hw_data *ha;
  3398. base_vha = pci_get_drvdata(pdev);
  3399. ha = base_vha->hw;
  3400. ql_log(ql_log_info, base_vha, 0xb079,
  3401. "Removing driver\n");
  3402. __qla_set_remove_flag(base_vha);
  3403. cancel_work_sync(&ha->board_disable);
  3404. /*
  3405. * If the PCI device is disabled then there was a PCI-disconnect and
  3406. * qla2x00_disable_board_on_pci_error has taken care of most of the
  3407. * resources.
  3408. */
  3409. if (!atomic_read(&pdev->enable_cnt)) {
  3410. dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
  3411. base_vha->gnl.l, base_vha->gnl.ldma);
  3412. base_vha->gnl.l = NULL;
  3413. scsi_host_put(base_vha->host);
  3414. kfree(ha);
  3415. pci_set_drvdata(pdev, NULL);
  3416. return;
  3417. }
  3418. qla2x00_wait_for_hba_ready(base_vha);
  3419. /*
  3420. * if UNLOADING flag is already set, then continue unload,
  3421. * where it was set first.
  3422. */
  3423. if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
  3424. return;
  3425. if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
  3426. IS_QLA28XX(ha)) {
  3427. if (ha->flags.fw_started)
  3428. qla2x00_abort_isp_cleanup(base_vha);
  3429. } else if (!IS_QLAFX00(ha)) {
  3430. if (IS_QLA8031(ha)) {
  3431. ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
  3432. "Clearing fcoe driver presence.\n");
  3433. if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
  3434. ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
  3435. "Error while clearing DRV-Presence.\n");
  3436. }
  3437. qla2x00_try_to_stop_firmware(base_vha);
  3438. }
  3439. qla2x00_wait_for_sess_deletion(base_vha);
  3440. qla_nvme_delete(base_vha);
  3441. dma_free_coherent(&ha->pdev->dev,
  3442. base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
  3443. base_vha->gnl.l = NULL;
  3444. qla_enode_stop(base_vha);
  3445. qla_edb_stop(base_vha);
  3446. vfree(base_vha->scan.l);
  3447. if (IS_QLAFX00(ha))
  3448. qlafx00_driver_shutdown(base_vha, 20);
  3449. qla2x00_delete_all_vps(ha, base_vha);
  3450. qla2x00_dfs_remove(base_vha);
  3451. qla84xx_put_chip(base_vha);
  3452. /* Disable timer */
  3453. if (base_vha->timer_active)
  3454. qla2x00_stop_timer(base_vha);
  3455. base_vha->flags.online = 0;
  3456. /* free DMA memory */
  3457. if (ha->exlogin_buf)
  3458. qla2x00_free_exlogin_buffer(ha);
  3459. /* free DMA memory */
  3460. if (ha->exchoffld_buf)
  3461. qla2x00_free_exchoffld_buffer(ha);
  3462. qla2x00_destroy_deferred_work(ha);
  3463. qlt_remove_target(ha, base_vha);
  3464. qla2x00_free_sysfs_attr(base_vha, true);
  3465. fc_remove_host(base_vha->host);
  3466. scsi_remove_host(base_vha->host);
  3467. qla2x00_free_device(base_vha);
  3468. qla2x00_clear_drv_active(ha);
  3469. scsi_host_put(base_vha->host);
  3470. qla2x00_unmap_iobases(ha);
  3471. pci_release_selected_regions(ha->pdev, ha->bars);
  3472. kfree(ha);
  3473. pci_disable_pcie_error_reporting(pdev);
  3474. pci_disable_device(pdev);
  3475. }
  3476. static inline void
  3477. qla24xx_free_purex_list(struct purex_list *list)
  3478. {
  3479. struct purex_item *item, *next;
  3480. ulong flags;
  3481. spin_lock_irqsave(&list->lock, flags);
  3482. list_for_each_entry_safe(item, next, &list->head, list) {
  3483. list_del(&item->list);
  3484. if (item == &item->vha->default_item)
  3485. continue;
  3486. kfree(item);
  3487. }
  3488. spin_unlock_irqrestore(&list->lock, flags);
  3489. }
  3490. static void
  3491. qla2x00_free_device(scsi_qla_host_t *vha)
  3492. {
  3493. struct qla_hw_data *ha = vha->hw;
  3494. qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
  3495. /* Disable timer */
  3496. if (vha->timer_active)
  3497. qla2x00_stop_timer(vha);
  3498. qla25xx_delete_queues(vha);
  3499. vha->flags.online = 0;
  3500. /* turn-off interrupts on the card */
  3501. if (ha->interrupts_on) {
  3502. vha->flags.init_done = 0;
  3503. ha->isp_ops->disable_intrs(ha);
  3504. }
  3505. qla2x00_free_fcports(vha);
  3506. qla2x00_free_irqs(vha);
  3507. /* Flush the work queue and remove it */
  3508. if (ha->wq) {
  3509. destroy_workqueue(ha->wq);
  3510. ha->wq = NULL;
  3511. }
  3512. qla24xx_free_purex_list(&vha->purex_list);
  3513. qla2x00_mem_free(ha);
  3514. qla82xx_md_free(vha);
  3515. qla_edif_sadb_release_free_pool(ha);
  3516. qla_edif_sadb_release(ha);
  3517. qla2x00_free_queues(ha);
  3518. }
  3519. void qla2x00_free_fcports(struct scsi_qla_host *vha)
  3520. {
  3521. fc_port_t *fcport, *tfcport;
  3522. list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list)
  3523. qla2x00_free_fcport(fcport);
  3524. }
  3525. static inline void
  3526. qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport)
  3527. {
  3528. int now;
  3529. if (!fcport->rport)
  3530. return;
  3531. if (fcport->rport) {
  3532. ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
  3533. "%s %8phN. rport %p roles %x\n",
  3534. __func__, fcport->port_name, fcport->rport,
  3535. fcport->rport->roles);
  3536. fc_remote_port_delete(fcport->rport);
  3537. }
  3538. qlt_do_generation_tick(vha, &now);
  3539. }
  3540. /*
  3541. * qla2x00_mark_device_lost Updates fcport state when device goes offline.
  3542. *
  3543. * Input: ha = adapter block pointer. fcport = port structure pointer.
  3544. *
  3545. * Return: None.
  3546. *
  3547. * Context:
  3548. */
  3549. void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
  3550. int do_login)
  3551. {
  3552. if (IS_QLAFX00(vha->hw)) {
  3553. qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
  3554. qla2x00_schedule_rport_del(vha, fcport);
  3555. return;
  3556. }
  3557. if (atomic_read(&fcport->state) == FCS_ONLINE &&
  3558. vha->vp_idx == fcport->vha->vp_idx) {
  3559. qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
  3560. qla2x00_schedule_rport_del(vha, fcport);
  3561. }
  3562. /*
  3563. * We may need to retry the login, so don't change the state of the
  3564. * port but do the retries.
  3565. */
  3566. if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
  3567. qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
  3568. if (!do_login)
  3569. return;
  3570. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  3571. }
  3572. void
  3573. qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
  3574. {
  3575. fc_port_t *fcport;
  3576. ql_dbg(ql_dbg_disc, vha, 0x20f1,
  3577. "Mark all dev lost\n");
  3578. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  3579. if (ql2xfc2target &&
  3580. fcport->loop_id != FC_NO_LOOP_ID &&
  3581. (fcport->flags & FCF_FCP2_DEVICE) &&
  3582. fcport->port_type == FCT_TARGET &&
  3583. !qla2x00_reset_active(vha)) {
  3584. ql_dbg(ql_dbg_disc, vha, 0x211a,
  3585. "Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC",
  3586. fcport->flags, fcport->port_type,
  3587. fcport->d_id.b24, fcport->port_name);
  3588. continue;
  3589. }
  3590. fcport->scan_state = 0;
  3591. qlt_schedule_sess_for_deletion(fcport);
  3592. }
  3593. }
  3594. static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
  3595. {
  3596. int i;
  3597. if (IS_FWI2_CAPABLE(ha))
  3598. return;
  3599. for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
  3600. set_bit(i, ha->loop_id_map);
  3601. set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
  3602. set_bit(BROADCAST, ha->loop_id_map);
  3603. }
  3604. /*
  3605. * qla2x00_mem_alloc
  3606. * Allocates adapter memory.
  3607. *
  3608. * Returns:
  3609. * 0 = success.
  3610. * !0 = failure.
  3611. */
  3612. static int
  3613. qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
  3614. struct req_que **req, struct rsp_que **rsp)
  3615. {
  3616. char name[16];
  3617. int rc;
  3618. ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
  3619. &ha->init_cb_dma, GFP_KERNEL);
  3620. if (!ha->init_cb)
  3621. goto fail;
  3622. rc = btree_init32(&ha->host_map);
  3623. if (rc)
  3624. goto fail_free_init_cb;
  3625. if (qlt_mem_alloc(ha) < 0)
  3626. goto fail_free_btree;
  3627. ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
  3628. qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
  3629. if (!ha->gid_list)
  3630. goto fail_free_tgt_mem;
  3631. ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
  3632. if (!ha->srb_mempool)
  3633. goto fail_free_gid_list;
  3634. if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable && IS_QLA28XX(ha))) {
  3635. /* Allocate cache for CT6 Ctx. */
  3636. if (!ctx_cachep) {
  3637. ctx_cachep = kmem_cache_create("qla2xxx_ctx",
  3638. sizeof(struct ct6_dsd), 0,
  3639. SLAB_HWCACHE_ALIGN, NULL);
  3640. if (!ctx_cachep)
  3641. goto fail_free_srb_mempool;
  3642. }
  3643. ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
  3644. ctx_cachep);
  3645. if (!ha->ctx_mempool)
  3646. goto fail_free_srb_mempool;
  3647. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
  3648. "ctx_cachep=%p ctx_mempool=%p.\n",
  3649. ctx_cachep, ha->ctx_mempool);
  3650. }
  3651. /* Get memory for cached NVRAM */
  3652. ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
  3653. if (!ha->nvram)
  3654. goto fail_free_ctx_mempool;
  3655. snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
  3656. ha->pdev->device);
  3657. ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
  3658. DMA_POOL_SIZE, 8, 0);
  3659. if (!ha->s_dma_pool)
  3660. goto fail_free_nvram;
  3661. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
  3662. "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
  3663. ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
  3664. if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) && ql2xsecenable)) {
  3665. ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
  3666. DSD_LIST_DMA_POOL_SIZE, 8, 0);
  3667. if (!ha->dl_dma_pool) {
  3668. ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
  3669. "Failed to allocate memory for dl_dma_pool.\n");
  3670. goto fail_s_dma_pool;
  3671. }
  3672. ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
  3673. FCP_CMND_DMA_POOL_SIZE, 8, 0);
  3674. if (!ha->fcp_cmnd_dma_pool) {
  3675. ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
  3676. "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
  3677. goto fail_dl_dma_pool;
  3678. }
  3679. if (ql2xenabledif) {
  3680. u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE;
  3681. struct dsd_dma *dsd, *nxt;
  3682. uint i;
  3683. /* Creata a DMA pool of buffers for DIF bundling */
  3684. ha->dif_bundl_pool = dma_pool_create(name,
  3685. &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0);
  3686. if (!ha->dif_bundl_pool) {
  3687. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
  3688. "%s: failed create dif_bundl_pool\n",
  3689. __func__);
  3690. goto fail_dif_bundl_dma_pool;
  3691. }
  3692. INIT_LIST_HEAD(&ha->pool.good.head);
  3693. INIT_LIST_HEAD(&ha->pool.unusable.head);
  3694. ha->pool.good.count = 0;
  3695. ha->pool.unusable.count = 0;
  3696. for (i = 0; i < 128; i++) {
  3697. dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC);
  3698. if (!dsd) {
  3699. ql_dbg_pci(ql_dbg_init, ha->pdev,
  3700. 0xe0ee, "%s: failed alloc dsd\n",
  3701. __func__);
  3702. return -ENOMEM;
  3703. }
  3704. ha->dif_bundle_kallocs++;
  3705. dsd->dsd_addr = dma_pool_alloc(
  3706. ha->dif_bundl_pool, GFP_ATOMIC,
  3707. &dsd->dsd_list_dma);
  3708. if (!dsd->dsd_addr) {
  3709. ql_dbg_pci(ql_dbg_init, ha->pdev,
  3710. 0xe0ee,
  3711. "%s: failed alloc ->dsd_addr\n",
  3712. __func__);
  3713. kfree(dsd);
  3714. ha->dif_bundle_kallocs--;
  3715. continue;
  3716. }
  3717. ha->dif_bundle_dma_allocs++;
  3718. /*
  3719. * if DMA buffer crosses 4G boundary,
  3720. * put it on bad list
  3721. */
  3722. if (MSD(dsd->dsd_list_dma) ^
  3723. MSD(dsd->dsd_list_dma + bufsize)) {
  3724. list_add_tail(&dsd->list,
  3725. &ha->pool.unusable.head);
  3726. ha->pool.unusable.count++;
  3727. } else {
  3728. list_add_tail(&dsd->list,
  3729. &ha->pool.good.head);
  3730. ha->pool.good.count++;
  3731. }
  3732. }
  3733. /* return the good ones back to the pool */
  3734. list_for_each_entry_safe(dsd, nxt,
  3735. &ha->pool.good.head, list) {
  3736. list_del(&dsd->list);
  3737. dma_pool_free(ha->dif_bundl_pool,
  3738. dsd->dsd_addr, dsd->dsd_list_dma);
  3739. ha->dif_bundle_dma_allocs--;
  3740. kfree(dsd);
  3741. ha->dif_bundle_kallocs--;
  3742. }
  3743. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
  3744. "%s: dif dma pool (good=%u unusable=%u)\n",
  3745. __func__, ha->pool.good.count,
  3746. ha->pool.unusable.count);
  3747. }
  3748. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
  3749. "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n",
  3750. ha->dl_dma_pool, ha->fcp_cmnd_dma_pool,
  3751. ha->dif_bundl_pool);
  3752. }
  3753. /* Allocate memory for SNS commands */
  3754. if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
  3755. /* Get consistent memory allocated for SNS commands */
  3756. ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
  3757. sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
  3758. if (!ha->sns_cmd)
  3759. goto fail_dma_pool;
  3760. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
  3761. "sns_cmd: %p.\n", ha->sns_cmd);
  3762. } else {
  3763. /* Get consistent memory allocated for MS IOCB */
  3764. ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
  3765. &ha->ms_iocb_dma);
  3766. if (!ha->ms_iocb)
  3767. goto fail_dma_pool;
  3768. /* Get consistent memory allocated for CT SNS commands */
  3769. ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
  3770. sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
  3771. if (!ha->ct_sns)
  3772. goto fail_free_ms_iocb;
  3773. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
  3774. "ms_iocb=%p ct_sns=%p.\n",
  3775. ha->ms_iocb, ha->ct_sns);
  3776. }
  3777. /* Allocate memory for request ring */
  3778. *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
  3779. if (!*req) {
  3780. ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
  3781. "Failed to allocate memory for req.\n");
  3782. goto fail_req;
  3783. }
  3784. (*req)->length = req_len;
  3785. (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
  3786. ((*req)->length + 1) * sizeof(request_t),
  3787. &(*req)->dma, GFP_KERNEL);
  3788. if (!(*req)->ring) {
  3789. ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
  3790. "Failed to allocate memory for req_ring.\n");
  3791. goto fail_req_ring;
  3792. }
  3793. /* Allocate memory for response ring */
  3794. *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
  3795. if (!*rsp) {
  3796. ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
  3797. "Failed to allocate memory for rsp.\n");
  3798. goto fail_rsp;
  3799. }
  3800. (*rsp)->hw = ha;
  3801. (*rsp)->length = rsp_len;
  3802. (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
  3803. ((*rsp)->length + 1) * sizeof(response_t),
  3804. &(*rsp)->dma, GFP_KERNEL);
  3805. if (!(*rsp)->ring) {
  3806. ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
  3807. "Failed to allocate memory for rsp_ring.\n");
  3808. goto fail_rsp_ring;
  3809. }
  3810. (*req)->rsp = *rsp;
  3811. (*rsp)->req = *req;
  3812. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
  3813. "req=%p req->length=%d req->ring=%p rsp=%p "
  3814. "rsp->length=%d rsp->ring=%p.\n",
  3815. *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
  3816. (*rsp)->ring);
  3817. /* Allocate memory for NVRAM data for vports */
  3818. if (ha->nvram_npiv_size) {
  3819. ha->npiv_info = kcalloc(ha->nvram_npiv_size,
  3820. sizeof(struct qla_npiv_entry),
  3821. GFP_KERNEL);
  3822. if (!ha->npiv_info) {
  3823. ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
  3824. "Failed to allocate memory for npiv_info.\n");
  3825. goto fail_npiv_info;
  3826. }
  3827. } else
  3828. ha->npiv_info = NULL;
  3829. /* Get consistent memory allocated for EX-INIT-CB. */
  3830. if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
  3831. IS_QLA28XX(ha)) {
  3832. ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
  3833. &ha->ex_init_cb_dma);
  3834. if (!ha->ex_init_cb)
  3835. goto fail_ex_init_cb;
  3836. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
  3837. "ex_init_cb=%p.\n", ha->ex_init_cb);
  3838. }
  3839. /* Get consistent memory allocated for Special Features-CB. */
  3840. if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
  3841. ha->sf_init_cb = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL,
  3842. &ha->sf_init_cb_dma);
  3843. if (!ha->sf_init_cb)
  3844. goto fail_sf_init_cb;
  3845. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199,
  3846. "sf_init_cb=%p.\n", ha->sf_init_cb);
  3847. }
  3848. INIT_LIST_HEAD(&ha->gbl_dsd_list);
  3849. /* Get consistent memory allocated for Async Port-Database. */
  3850. if (!IS_FWI2_CAPABLE(ha)) {
  3851. ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
  3852. &ha->async_pd_dma);
  3853. if (!ha->async_pd)
  3854. goto fail_async_pd;
  3855. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
  3856. "async_pd=%p.\n", ha->async_pd);
  3857. }
  3858. INIT_LIST_HEAD(&ha->vp_list);
  3859. /* Allocate memory for our loop_id bitmap */
  3860. ha->loop_id_map = kcalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE),
  3861. sizeof(long),
  3862. GFP_KERNEL);
  3863. if (!ha->loop_id_map)
  3864. goto fail_loop_id_map;
  3865. else {
  3866. qla2x00_set_reserved_loop_ids(ha);
  3867. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
  3868. "loop_id_map=%p.\n", ha->loop_id_map);
  3869. }
  3870. ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev,
  3871. SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL);
  3872. if (!ha->sfp_data) {
  3873. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
  3874. "Unable to allocate memory for SFP read-data.\n");
  3875. goto fail_sfp_data;
  3876. }
  3877. ha->flt = dma_alloc_coherent(&ha->pdev->dev,
  3878. sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma,
  3879. GFP_KERNEL);
  3880. if (!ha->flt) {
  3881. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
  3882. "Unable to allocate memory for FLT.\n");
  3883. goto fail_flt_buffer;
  3884. }
  3885. /* allocate the purex dma pool */
  3886. ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev,
  3887. ELS_MAX_PAYLOAD, 8, 0);
  3888. if (!ha->purex_dma_pool) {
  3889. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
  3890. "Unable to allocate purex_dma_pool.\n");
  3891. goto fail_flt;
  3892. }
  3893. ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16;
  3894. ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev,
  3895. ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL);
  3896. if (!ha->elsrej.c) {
  3897. ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
  3898. "Alloc failed for els reject cmd.\n");
  3899. goto fail_elsrej;
  3900. }
  3901. ha->elsrej.c->er_cmd = ELS_LS_RJT;
  3902. ha->elsrej.c->er_reason = ELS_RJT_LOGIC;
  3903. ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA;
  3904. return 0;
  3905. fail_elsrej:
  3906. dma_pool_destroy(ha->purex_dma_pool);
  3907. fail_flt:
  3908. dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
  3909. ha->flt, ha->flt_dma);
  3910. fail_flt_buffer:
  3911. dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
  3912. ha->sfp_data, ha->sfp_data_dma);
  3913. fail_sfp_data:
  3914. kfree(ha->loop_id_map);
  3915. fail_loop_id_map:
  3916. dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
  3917. fail_async_pd:
  3918. dma_pool_free(ha->s_dma_pool, ha->sf_init_cb, ha->sf_init_cb_dma);
  3919. fail_sf_init_cb:
  3920. dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
  3921. fail_ex_init_cb:
  3922. kfree(ha->npiv_info);
  3923. fail_npiv_info:
  3924. dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
  3925. sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
  3926. (*rsp)->ring = NULL;
  3927. (*rsp)->dma = 0;
  3928. fail_rsp_ring:
  3929. kfree(*rsp);
  3930. *rsp = NULL;
  3931. fail_rsp:
  3932. dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
  3933. sizeof(request_t), (*req)->ring, (*req)->dma);
  3934. (*req)->ring = NULL;
  3935. (*req)->dma = 0;
  3936. fail_req_ring:
  3937. kfree(*req);
  3938. *req = NULL;
  3939. fail_req:
  3940. dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
  3941. ha->ct_sns, ha->ct_sns_dma);
  3942. ha->ct_sns = NULL;
  3943. ha->ct_sns_dma = 0;
  3944. fail_free_ms_iocb:
  3945. dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
  3946. ha->ms_iocb = NULL;
  3947. ha->ms_iocb_dma = 0;
  3948. if (ha->sns_cmd)
  3949. dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
  3950. ha->sns_cmd, ha->sns_cmd_dma);
  3951. fail_dma_pool:
  3952. if (ql2xenabledif) {
  3953. struct dsd_dma *dsd, *nxt;
  3954. list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
  3955. list) {
  3956. list_del(&dsd->list);
  3957. dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
  3958. dsd->dsd_list_dma);
  3959. ha->dif_bundle_dma_allocs--;
  3960. kfree(dsd);
  3961. ha->dif_bundle_kallocs--;
  3962. ha->pool.unusable.count--;
  3963. }
  3964. dma_pool_destroy(ha->dif_bundl_pool);
  3965. ha->dif_bundl_pool = NULL;
  3966. }
  3967. fail_dif_bundl_dma_pool:
  3968. if (IS_QLA82XX(ha) || ql2xenabledif) {
  3969. dma_pool_destroy(ha->fcp_cmnd_dma_pool);
  3970. ha->fcp_cmnd_dma_pool = NULL;
  3971. }
  3972. fail_dl_dma_pool:
  3973. if (IS_QLA82XX(ha) || ql2xenabledif) {
  3974. dma_pool_destroy(ha->dl_dma_pool);
  3975. ha->dl_dma_pool = NULL;
  3976. }
  3977. fail_s_dma_pool:
  3978. dma_pool_destroy(ha->s_dma_pool);
  3979. ha->s_dma_pool = NULL;
  3980. fail_free_nvram:
  3981. kfree(ha->nvram);
  3982. ha->nvram = NULL;
  3983. fail_free_ctx_mempool:
  3984. mempool_destroy(ha->ctx_mempool);
  3985. ha->ctx_mempool = NULL;
  3986. fail_free_srb_mempool:
  3987. mempool_destroy(ha->srb_mempool);
  3988. ha->srb_mempool = NULL;
  3989. fail_free_gid_list:
  3990. dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
  3991. ha->gid_list,
  3992. ha->gid_list_dma);
  3993. ha->gid_list = NULL;
  3994. ha->gid_list_dma = 0;
  3995. fail_free_tgt_mem:
  3996. qlt_mem_free(ha);
  3997. fail_free_btree:
  3998. btree_destroy32(&ha->host_map);
  3999. fail_free_init_cb:
  4000. dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
  4001. ha->init_cb_dma);
  4002. ha->init_cb = NULL;
  4003. ha->init_cb_dma = 0;
  4004. fail:
  4005. ql_log(ql_log_fatal, NULL, 0x0030,
  4006. "Memory allocation failure.\n");
  4007. return -ENOMEM;
  4008. }
  4009. int
  4010. qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
  4011. {
  4012. int rval;
  4013. uint16_t size, max_cnt;
  4014. uint32_t temp;
  4015. struct qla_hw_data *ha = vha->hw;
  4016. /* Return if we don't need to alloacate any extended logins */
  4017. if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400)
  4018. return QLA_SUCCESS;
  4019. if (!IS_EXLOGIN_OFFLD_CAPABLE(ha))
  4020. return QLA_SUCCESS;
  4021. ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins);
  4022. max_cnt = 0;
  4023. rval = qla_get_exlogin_status(vha, &size, &max_cnt);
  4024. if (rval != QLA_SUCCESS) {
  4025. ql_log_pci(ql_log_fatal, ha->pdev, 0xd029,
  4026. "Failed to get exlogin status.\n");
  4027. return rval;
  4028. }
  4029. temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins;
  4030. temp *= size;
  4031. if (temp != ha->exlogin_size) {
  4032. qla2x00_free_exlogin_buffer(ha);
  4033. ha->exlogin_size = temp;
  4034. ql_log(ql_log_info, vha, 0xd024,
  4035. "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n",
  4036. max_cnt, size, temp);
  4037. ql_log(ql_log_info, vha, 0xd025,
  4038. "EXLOGIN: requested size=0x%x\n", ha->exlogin_size);
  4039. /* Get consistent memory for extended logins */
  4040. ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev,
  4041. ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL);
  4042. if (!ha->exlogin_buf) {
  4043. ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a,
  4044. "Failed to allocate memory for exlogin_buf_dma.\n");
  4045. return -ENOMEM;
  4046. }
  4047. }
  4048. /* Now configure the dma buffer */
  4049. rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma);
  4050. if (rval) {
  4051. ql_log(ql_log_fatal, vha, 0xd033,
  4052. "Setup extended login buffer ****FAILED****.\n");
  4053. qla2x00_free_exlogin_buffer(ha);
  4054. }
  4055. return rval;
  4056. }
  4057. /*
  4058. * qla2x00_free_exlogin_buffer
  4059. *
  4060. * Input:
  4061. * ha = adapter block pointer
  4062. */
  4063. void
  4064. qla2x00_free_exlogin_buffer(struct qla_hw_data *ha)
  4065. {
  4066. if (ha->exlogin_buf) {
  4067. dma_free_coherent(&ha->pdev->dev, ha->exlogin_size,
  4068. ha->exlogin_buf, ha->exlogin_buf_dma);
  4069. ha->exlogin_buf = NULL;
  4070. ha->exlogin_size = 0;
  4071. }
  4072. }
  4073. static void
  4074. qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
  4075. {
  4076. u32 temp;
  4077. struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb;
  4078. *ret_cnt = FW_DEF_EXCHANGES_CNT;
  4079. if (max_cnt > vha->hw->max_exchg)
  4080. max_cnt = vha->hw->max_exchg;
  4081. if (qla_ini_mode_enabled(vha)) {
  4082. if (vha->ql2xiniexchg > max_cnt)
  4083. vha->ql2xiniexchg = max_cnt;
  4084. if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
  4085. *ret_cnt = vha->ql2xiniexchg;
  4086. } else if (qla_tgt_mode_enabled(vha)) {
  4087. if (vha->ql2xexchoffld > max_cnt) {
  4088. vha->ql2xexchoffld = max_cnt;
  4089. icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
  4090. }
  4091. if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
  4092. *ret_cnt = vha->ql2xexchoffld;
  4093. } else if (qla_dual_mode_enabled(vha)) {
  4094. temp = vha->ql2xiniexchg + vha->ql2xexchoffld;
  4095. if (temp > max_cnt) {
  4096. vha->ql2xiniexchg -= (temp - max_cnt)/2;
  4097. vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
  4098. temp = max_cnt;
  4099. icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
  4100. }
  4101. if (temp > FW_DEF_EXCHANGES_CNT)
  4102. *ret_cnt = temp;
  4103. }
  4104. }
  4105. int
  4106. qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
  4107. {
  4108. int rval;
  4109. u16 size, max_cnt;
  4110. u32 actual_cnt, totsz;
  4111. struct qla_hw_data *ha = vha->hw;
  4112. if (!ha->flags.exchoffld_enabled)
  4113. return QLA_SUCCESS;
  4114. if (!IS_EXCHG_OFFLD_CAPABLE(ha))
  4115. return QLA_SUCCESS;
  4116. max_cnt = 0;
  4117. rval = qla_get_exchoffld_status(vha, &size, &max_cnt);
  4118. if (rval != QLA_SUCCESS) {
  4119. ql_log_pci(ql_log_fatal, ha->pdev, 0xd012,
  4120. "Failed to get exlogin status.\n");
  4121. return rval;
  4122. }
  4123. qla2x00_number_of_exch(vha, &actual_cnt, max_cnt);
  4124. ql_log(ql_log_info, vha, 0xd014,
  4125. "Actual exchange offload count: %d.\n", actual_cnt);
  4126. totsz = actual_cnt * size;
  4127. if (totsz != ha->exchoffld_size) {
  4128. qla2x00_free_exchoffld_buffer(ha);
  4129. if (actual_cnt <= FW_DEF_EXCHANGES_CNT) {
  4130. ha->exchoffld_size = 0;
  4131. ha->flags.exchoffld_enabled = 0;
  4132. return QLA_SUCCESS;
  4133. }
  4134. ha->exchoffld_size = totsz;
  4135. ql_log(ql_log_info, vha, 0xd016,
  4136. "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n",
  4137. max_cnt, actual_cnt, size, totsz);
  4138. ql_log(ql_log_info, vha, 0xd017,
  4139. "Exchange Buffers requested size = 0x%x\n",
  4140. ha->exchoffld_size);
  4141. /* Get consistent memory for extended logins */
  4142. ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev,
  4143. ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
  4144. if (!ha->exchoffld_buf) {
  4145. ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
  4146. "Failed to allocate memory for Exchange Offload.\n");
  4147. if (ha->max_exchg >
  4148. (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) {
  4149. ha->max_exchg -= REDUCE_EXCHANGES_CNT;
  4150. } else if (ha->max_exchg >
  4151. (FW_DEF_EXCHANGES_CNT + 512)) {
  4152. ha->max_exchg -= 512;
  4153. } else {
  4154. ha->flags.exchoffld_enabled = 0;
  4155. ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
  4156. "Disabling Exchange offload due to lack of memory\n");
  4157. }
  4158. ha->exchoffld_size = 0;
  4159. return -ENOMEM;
  4160. }
  4161. } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) {
  4162. /* pathological case */
  4163. qla2x00_free_exchoffld_buffer(ha);
  4164. ha->exchoffld_size = 0;
  4165. ha->flags.exchoffld_enabled = 0;
  4166. ql_log(ql_log_info, vha, 0xd016,
  4167. "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n",
  4168. ha->exchoffld_size, actual_cnt, size, totsz);
  4169. return 0;
  4170. }
  4171. /* Now configure the dma buffer */
  4172. rval = qla_set_exchoffld_mem_cfg(vha);
  4173. if (rval) {
  4174. ql_log(ql_log_fatal, vha, 0xd02e,
  4175. "Setup exchange offload buffer ****FAILED****.\n");
  4176. qla2x00_free_exchoffld_buffer(ha);
  4177. } else {
  4178. /* re-adjust number of target exchange */
  4179. struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb;
  4180. if (qla_ini_mode_enabled(vha))
  4181. icb->exchange_count = 0;
  4182. else
  4183. icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
  4184. }
  4185. return rval;
  4186. }
  4187. /*
  4188. * qla2x00_free_exchoffld_buffer
  4189. *
  4190. * Input:
  4191. * ha = adapter block pointer
  4192. */
  4193. void
  4194. qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha)
  4195. {
  4196. if (ha->exchoffld_buf) {
  4197. dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size,
  4198. ha->exchoffld_buf, ha->exchoffld_buf_dma);
  4199. ha->exchoffld_buf = NULL;
  4200. ha->exchoffld_size = 0;
  4201. }
  4202. }
  4203. /*
  4204. * qla2x00_free_fw_dump
  4205. * Frees fw dump stuff.
  4206. *
  4207. * Input:
  4208. * ha = adapter block pointer
  4209. */
  4210. static void
  4211. qla2x00_free_fw_dump(struct qla_hw_data *ha)
  4212. {
  4213. struct fwdt *fwdt = ha->fwdt;
  4214. uint j;
  4215. if (ha->fce)
  4216. dma_free_coherent(&ha->pdev->dev,
  4217. FCE_SIZE, ha->fce, ha->fce_dma);
  4218. if (ha->eft)
  4219. dma_free_coherent(&ha->pdev->dev,
  4220. EFT_SIZE, ha->eft, ha->eft_dma);
  4221. vfree(ha->fw_dump);
  4222. ha->fce = NULL;
  4223. ha->fce_dma = 0;
  4224. ha->flags.fce_enabled = 0;
  4225. ha->eft = NULL;
  4226. ha->eft_dma = 0;
  4227. ha->fw_dumped = false;
  4228. ha->fw_dump_cap_flags = 0;
  4229. ha->fw_dump_reading = 0;
  4230. ha->fw_dump = NULL;
  4231. ha->fw_dump_len = 0;
  4232. for (j = 0; j < 2; j++, fwdt++) {
  4233. vfree(fwdt->template);
  4234. fwdt->template = NULL;
  4235. fwdt->length = 0;
  4236. }
  4237. }
  4238. /*
  4239. * qla2x00_mem_free
  4240. * Frees all adapter allocated memory.
  4241. *
  4242. * Input:
  4243. * ha = adapter block pointer.
  4244. */
  4245. static void
  4246. qla2x00_mem_free(struct qla_hw_data *ha)
  4247. {
  4248. qla2x00_free_fw_dump(ha);
  4249. if (ha->mctp_dump)
  4250. dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
  4251. ha->mctp_dump_dma);
  4252. ha->mctp_dump = NULL;
  4253. mempool_destroy(ha->srb_mempool);
  4254. ha->srb_mempool = NULL;
  4255. if (ha->dcbx_tlv)
  4256. dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
  4257. ha->dcbx_tlv, ha->dcbx_tlv_dma);
  4258. ha->dcbx_tlv = NULL;
  4259. if (ha->xgmac_data)
  4260. dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
  4261. ha->xgmac_data, ha->xgmac_data_dma);
  4262. ha->xgmac_data = NULL;
  4263. if (ha->sns_cmd)
  4264. dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
  4265. ha->sns_cmd, ha->sns_cmd_dma);
  4266. ha->sns_cmd = NULL;
  4267. ha->sns_cmd_dma = 0;
  4268. if (ha->ct_sns)
  4269. dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
  4270. ha->ct_sns, ha->ct_sns_dma);
  4271. ha->ct_sns = NULL;
  4272. ha->ct_sns_dma = 0;
  4273. if (ha->sfp_data)
  4274. dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data,
  4275. ha->sfp_data_dma);
  4276. ha->sfp_data = NULL;
  4277. if (ha->flt)
  4278. dma_free_coherent(&ha->pdev->dev,
  4279. sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE,
  4280. ha->flt, ha->flt_dma);
  4281. ha->flt = NULL;
  4282. ha->flt_dma = 0;
  4283. if (ha->ms_iocb)
  4284. dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
  4285. ha->ms_iocb = NULL;
  4286. ha->ms_iocb_dma = 0;
  4287. if (ha->sf_init_cb)
  4288. dma_pool_free(ha->s_dma_pool,
  4289. ha->sf_init_cb, ha->sf_init_cb_dma);
  4290. if (ha->ex_init_cb)
  4291. dma_pool_free(ha->s_dma_pool,
  4292. ha->ex_init_cb, ha->ex_init_cb_dma);
  4293. ha->ex_init_cb = NULL;
  4294. ha->ex_init_cb_dma = 0;
  4295. if (ha->async_pd)
  4296. dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
  4297. ha->async_pd = NULL;
  4298. ha->async_pd_dma = 0;
  4299. dma_pool_destroy(ha->s_dma_pool);
  4300. ha->s_dma_pool = NULL;
  4301. if (ha->gid_list)
  4302. dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
  4303. ha->gid_list, ha->gid_list_dma);
  4304. ha->gid_list = NULL;
  4305. ha->gid_list_dma = 0;
  4306. if (IS_QLA82XX(ha)) {
  4307. if (!list_empty(&ha->gbl_dsd_list)) {
  4308. struct dsd_dma *dsd_ptr, *tdsd_ptr;
  4309. /* clean up allocated prev pool */
  4310. list_for_each_entry_safe(dsd_ptr,
  4311. tdsd_ptr, &ha->gbl_dsd_list, list) {
  4312. dma_pool_free(ha->dl_dma_pool,
  4313. dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
  4314. list_del(&dsd_ptr->list);
  4315. kfree(dsd_ptr);
  4316. }
  4317. }
  4318. }
  4319. dma_pool_destroy(ha->dl_dma_pool);
  4320. ha->dl_dma_pool = NULL;
  4321. dma_pool_destroy(ha->fcp_cmnd_dma_pool);
  4322. ha->fcp_cmnd_dma_pool = NULL;
  4323. mempool_destroy(ha->ctx_mempool);
  4324. ha->ctx_mempool = NULL;
  4325. if (ql2xenabledif && ha->dif_bundl_pool) {
  4326. struct dsd_dma *dsd, *nxt;
  4327. list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
  4328. list) {
  4329. list_del(&dsd->list);
  4330. dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
  4331. dsd->dsd_list_dma);
  4332. ha->dif_bundle_dma_allocs--;
  4333. kfree(dsd);
  4334. ha->dif_bundle_kallocs--;
  4335. ha->pool.unusable.count--;
  4336. }
  4337. list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) {
  4338. list_del(&dsd->list);
  4339. dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
  4340. dsd->dsd_list_dma);
  4341. ha->dif_bundle_dma_allocs--;
  4342. kfree(dsd);
  4343. ha->dif_bundle_kallocs--;
  4344. }
  4345. }
  4346. dma_pool_destroy(ha->dif_bundl_pool);
  4347. ha->dif_bundl_pool = NULL;
  4348. qlt_mem_free(ha);
  4349. qla_remove_hostmap(ha);
  4350. if (ha->init_cb)
  4351. dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
  4352. ha->init_cb, ha->init_cb_dma);
  4353. dma_pool_destroy(ha->purex_dma_pool);
  4354. ha->purex_dma_pool = NULL;
  4355. if (ha->elsrej.c) {
  4356. dma_free_coherent(&ha->pdev->dev, ha->elsrej.size,
  4357. ha->elsrej.c, ha->elsrej.cdma);
  4358. ha->elsrej.c = NULL;
  4359. }
  4360. ha->init_cb = NULL;
  4361. ha->init_cb_dma = 0;
  4362. vfree(ha->optrom_buffer);
  4363. ha->optrom_buffer = NULL;
  4364. kfree(ha->nvram);
  4365. ha->nvram = NULL;
  4366. kfree(ha->npiv_info);
  4367. ha->npiv_info = NULL;
  4368. kfree(ha->swl);
  4369. ha->swl = NULL;
  4370. kfree(ha->loop_id_map);
  4371. ha->sf_init_cb = NULL;
  4372. ha->sf_init_cb_dma = 0;
  4373. ha->loop_id_map = NULL;
  4374. }
  4375. struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
  4376. struct qla_hw_data *ha)
  4377. {
  4378. struct Scsi_Host *host;
  4379. struct scsi_qla_host *vha = NULL;
  4380. host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
  4381. if (!host) {
  4382. ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
  4383. "Failed to allocate host from the scsi layer, aborting.\n");
  4384. return NULL;
  4385. }
  4386. /* Clear our data area */
  4387. vha = shost_priv(host);
  4388. memset(vha, 0, sizeof(scsi_qla_host_t));
  4389. vha->host = host;
  4390. vha->host_no = host->host_no;
  4391. vha->hw = ha;
  4392. vha->qlini_mode = ql2x_ini_mode;
  4393. vha->ql2xexchoffld = ql2xexchoffld;
  4394. vha->ql2xiniexchg = ql2xiniexchg;
  4395. INIT_LIST_HEAD(&vha->vp_fcports);
  4396. INIT_LIST_HEAD(&vha->work_list);
  4397. INIT_LIST_HEAD(&vha->list);
  4398. INIT_LIST_HEAD(&vha->qla_cmd_list);
  4399. INIT_LIST_HEAD(&vha->logo_list);
  4400. INIT_LIST_HEAD(&vha->plogi_ack_list);
  4401. INIT_LIST_HEAD(&vha->qp_list);
  4402. INIT_LIST_HEAD(&vha->gnl.fcports);
  4403. INIT_LIST_HEAD(&vha->gpnid_list);
  4404. INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
  4405. INIT_LIST_HEAD(&vha->purex_list.head);
  4406. spin_lock_init(&vha->purex_list.lock);
  4407. spin_lock_init(&vha->work_lock);
  4408. spin_lock_init(&vha->cmd_list_lock);
  4409. init_waitqueue_head(&vha->fcport_waitQ);
  4410. init_waitqueue_head(&vha->vref_waitq);
  4411. qla_enode_init(vha);
  4412. qla_edb_init(vha);
  4413. vha->gnl.size = sizeof(struct get_name_list_extended) *
  4414. (ha->max_loop_id + 1);
  4415. vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev,
  4416. vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL);
  4417. if (!vha->gnl.l) {
  4418. ql_log(ql_log_fatal, vha, 0xd04a,
  4419. "Alloc failed for name list.\n");
  4420. scsi_host_put(vha->host);
  4421. return NULL;
  4422. }
  4423. /* todo: what about ext login? */
  4424. vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp);
  4425. vha->scan.l = vmalloc(vha->scan.size);
  4426. if (!vha->scan.l) {
  4427. ql_log(ql_log_fatal, vha, 0xd04a,
  4428. "Alloc failed for scan database.\n");
  4429. dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
  4430. vha->gnl.l, vha->gnl.ldma);
  4431. vha->gnl.l = NULL;
  4432. scsi_host_put(vha->host);
  4433. return NULL;
  4434. }
  4435. INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
  4436. snprintf(vha->host_str, sizeof(vha->host_str), "%s_%lu",
  4437. QLA2XXX_DRIVER_NAME, vha->host_no);
  4438. ql_dbg(ql_dbg_init, vha, 0x0041,
  4439. "Allocated the host=%p hw=%p vha=%p dev_name=%s",
  4440. vha->host, vha->hw, vha,
  4441. dev_name(&(ha->pdev->dev)));
  4442. return vha;
  4443. }
  4444. struct qla_work_evt *
  4445. qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
  4446. {
  4447. struct qla_work_evt *e;
  4448. if (test_bit(UNLOADING, &vha->dpc_flags))
  4449. return NULL;
  4450. if (qla_vha_mark_busy(vha))
  4451. return NULL;
  4452. e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
  4453. if (!e) {
  4454. QLA_VHA_MARK_NOT_BUSY(vha);
  4455. return NULL;
  4456. }
  4457. INIT_LIST_HEAD(&e->list);
  4458. e->type = type;
  4459. e->flags = QLA_EVT_FLAG_FREE;
  4460. return e;
  4461. }
  4462. int
  4463. qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
  4464. {
  4465. unsigned long flags;
  4466. bool q = false;
  4467. spin_lock_irqsave(&vha->work_lock, flags);
  4468. list_add_tail(&e->list, &vha->work_list);
  4469. if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
  4470. q = true;
  4471. spin_unlock_irqrestore(&vha->work_lock, flags);
  4472. if (q)
  4473. queue_work(vha->hw->wq, &vha->iocb_work);
  4474. return QLA_SUCCESS;
  4475. }
  4476. int
  4477. qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
  4478. u32 data)
  4479. {
  4480. struct qla_work_evt *e;
  4481. e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
  4482. if (!e)
  4483. return QLA_FUNCTION_FAILED;
  4484. e->u.aen.code = code;
  4485. e->u.aen.data = data;
  4486. return qla2x00_post_work(vha, e);
  4487. }
  4488. int
  4489. qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
  4490. {
  4491. struct qla_work_evt *e;
  4492. e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
  4493. if (!e)
  4494. return QLA_FUNCTION_FAILED;
  4495. memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
  4496. return qla2x00_post_work(vha, e);
  4497. }
  4498. #define qla2x00_post_async_work(name, type) \
  4499. int qla2x00_post_async_##name##_work( \
  4500. struct scsi_qla_host *vha, \
  4501. fc_port_t *fcport, uint16_t *data) \
  4502. { \
  4503. struct qla_work_evt *e; \
  4504. \
  4505. e = qla2x00_alloc_work(vha, type); \
  4506. if (!e) \
  4507. return QLA_FUNCTION_FAILED; \
  4508. \
  4509. e->u.logio.fcport = fcport; \
  4510. if (data) { \
  4511. e->u.logio.data[0] = data[0]; \
  4512. e->u.logio.data[1] = data[1]; \
  4513. } \
  4514. fcport->flags |= FCF_ASYNC_ACTIVE; \
  4515. return qla2x00_post_work(vha, e); \
  4516. }
  4517. qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
  4518. qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
  4519. qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
  4520. qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
  4521. qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
  4522. int
  4523. qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
  4524. {
  4525. struct qla_work_evt *e;
  4526. e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
  4527. if (!e)
  4528. return QLA_FUNCTION_FAILED;
  4529. e->u.uevent.code = code;
  4530. return qla2x00_post_work(vha, e);
  4531. }
  4532. static void
  4533. qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
  4534. {
  4535. char event_string[40];
  4536. char *envp[] = { event_string, NULL };
  4537. switch (code) {
  4538. case QLA_UEVENT_CODE_FW_DUMP:
  4539. snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu",
  4540. vha->host_no);
  4541. break;
  4542. default:
  4543. /* do nothing */
  4544. break;
  4545. }
  4546. kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
  4547. }
  4548. int
  4549. qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode,
  4550. uint32_t *data, int cnt)
  4551. {
  4552. struct qla_work_evt *e;
  4553. e = qla2x00_alloc_work(vha, QLA_EVT_AENFX);
  4554. if (!e)
  4555. return QLA_FUNCTION_FAILED;
  4556. e->u.aenfx.evtcode = evtcode;
  4557. e->u.aenfx.count = cnt;
  4558. memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt);
  4559. return qla2x00_post_work(vha, e);
  4560. }
  4561. void qla24xx_sched_upd_fcport(fc_port_t *fcport)
  4562. {
  4563. unsigned long flags;
  4564. if (IS_SW_RESV_ADDR(fcport->d_id))
  4565. return;
  4566. spin_lock_irqsave(&fcport->vha->work_lock, flags);
  4567. if (fcport->disc_state == DSC_UPD_FCPORT) {
  4568. spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
  4569. return;
  4570. }
  4571. fcport->jiffies_at_registration = jiffies;
  4572. fcport->sec_since_registration = 0;
  4573. fcport->next_disc_state = DSC_DELETED;
  4574. qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
  4575. spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
  4576. queue_work(system_unbound_wq, &fcport->reg_work);
  4577. }
  4578. static
  4579. void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
  4580. {
  4581. unsigned long flags;
  4582. fc_port_t *fcport = NULL, *tfcp;
  4583. struct qlt_plogi_ack_t *pla =
  4584. (struct qlt_plogi_ack_t *)e->u.new_sess.pla;
  4585. uint8_t free_fcport = 0;
  4586. ql_dbg(ql_dbg_disc, vha, 0xffff,
  4587. "%s %d %8phC enter\n",
  4588. __func__, __LINE__, e->u.new_sess.port_name);
  4589. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  4590. fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
  4591. if (fcport) {
  4592. fcport->d_id = e->u.new_sess.id;
  4593. if (pla) {
  4594. fcport->fw_login_state = DSC_LS_PLOGI_PEND;
  4595. memcpy(fcport->node_name,
  4596. pla->iocb.u.isp24.u.plogi.node_name,
  4597. WWN_SIZE);
  4598. qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN);
  4599. /* we took an extra ref_count to prevent PLOGI ACK when
  4600. * fcport/sess has not been created.
  4601. */
  4602. pla->ref_count--;
  4603. }
  4604. } else {
  4605. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  4606. fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  4607. if (fcport) {
  4608. fcport->d_id = e->u.new_sess.id;
  4609. fcport->flags |= FCF_FABRIC_DEVICE;
  4610. fcport->fw_login_state = DSC_LS_PLOGI_PEND;
  4611. fcport->tgt_short_link_down_cnt = 0;
  4612. memcpy(fcport->port_name, e->u.new_sess.port_name,
  4613. WWN_SIZE);
  4614. fcport->fc4_type = e->u.new_sess.fc4_type;
  4615. if (NVME_PRIORITY(vha->hw, fcport))
  4616. fcport->do_prli_nvme = 1;
  4617. else
  4618. fcport->do_prli_nvme = 0;
  4619. if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
  4620. fcport->dm_login_expire = jiffies +
  4621. QLA_N2N_WAIT_TIME * HZ;
  4622. fcport->fc4_type = FS_FC4TYPE_FCP;
  4623. fcport->n2n_flag = 1;
  4624. if (vha->flags.nvme_enabled)
  4625. fcport->fc4_type |= FS_FC4TYPE_NVME;
  4626. }
  4627. } else {
  4628. ql_dbg(ql_dbg_disc, vha, 0xffff,
  4629. "%s %8phC mem alloc fail.\n",
  4630. __func__, e->u.new_sess.port_name);
  4631. if (pla) {
  4632. list_del(&pla->list);
  4633. kmem_cache_free(qla_tgt_plogi_cachep, pla);
  4634. }
  4635. return;
  4636. }
  4637. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  4638. /* search again to make sure no one else got ahead */
  4639. tfcp = qla2x00_find_fcport_by_wwpn(vha,
  4640. e->u.new_sess.port_name, 1);
  4641. if (tfcp) {
  4642. /* should rarily happen */
  4643. ql_dbg(ql_dbg_disc, vha, 0xffff,
  4644. "%s %8phC found existing fcport b4 add. DS %d LS %d\n",
  4645. __func__, tfcp->port_name, tfcp->disc_state,
  4646. tfcp->fw_login_state);
  4647. free_fcport = 1;
  4648. } else {
  4649. list_add_tail(&fcport->list, &vha->vp_fcports);
  4650. }
  4651. if (pla) {
  4652. qlt_plogi_ack_link(vha, pla, fcport,
  4653. QLT_PLOGI_LINK_SAME_WWN);
  4654. pla->ref_count--;
  4655. }
  4656. }
  4657. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  4658. if (fcport) {
  4659. fcport->id_changed = 1;
  4660. fcport->scan_state = QLA_FCPORT_FOUND;
  4661. fcport->chip_reset = vha->hw->base_qpair->chip_reset;
  4662. memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
  4663. if (pla) {
  4664. if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) {
  4665. u16 wd3_lo;
  4666. fcport->fw_login_state = DSC_LS_PRLI_PEND;
  4667. fcport->local = 0;
  4668. fcport->loop_id =
  4669. le16_to_cpu(
  4670. pla->iocb.u.isp24.nport_handle);
  4671. fcport->fw_login_state = DSC_LS_PRLI_PEND;
  4672. wd3_lo =
  4673. le16_to_cpu(
  4674. pla->iocb.u.isp24.u.prli.wd3_lo);
  4675. if (wd3_lo & BIT_7)
  4676. fcport->conf_compl_supported = 1;
  4677. if ((wd3_lo & BIT_4) == 0)
  4678. fcport->port_type = FCT_INITIATOR;
  4679. else
  4680. fcport->port_type = FCT_TARGET;
  4681. }
  4682. qlt_plogi_ack_unref(vha, pla);
  4683. } else {
  4684. fc_port_t *dfcp = NULL;
  4685. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  4686. tfcp = qla2x00_find_fcport_by_nportid(vha,
  4687. &e->u.new_sess.id, 1);
  4688. if (tfcp && (tfcp != fcport)) {
  4689. /*
  4690. * We have a conflict fcport with same NportID.
  4691. */
  4692. ql_dbg(ql_dbg_disc, vha, 0xffff,
  4693. "%s %8phC found conflict b4 add. DS %d LS %d\n",
  4694. __func__, tfcp->port_name, tfcp->disc_state,
  4695. tfcp->fw_login_state);
  4696. switch (tfcp->disc_state) {
  4697. case DSC_DELETED:
  4698. break;
  4699. case DSC_DELETE_PEND:
  4700. fcport->login_pause = 1;
  4701. tfcp->conflict = fcport;
  4702. break;
  4703. default:
  4704. fcport->login_pause = 1;
  4705. tfcp->conflict = fcport;
  4706. dfcp = tfcp;
  4707. break;
  4708. }
  4709. }
  4710. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  4711. if (dfcp)
  4712. qlt_schedule_sess_for_deletion(tfcp);
  4713. if (N2N_TOPO(vha->hw)) {
  4714. fcport->flags &= ~FCF_FABRIC_DEVICE;
  4715. fcport->keep_nport_handle = 1;
  4716. if (vha->flags.nvme_enabled) {
  4717. fcport->fc4_type =
  4718. (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP);
  4719. fcport->n2n_flag = 1;
  4720. }
  4721. fcport->fw_login_state = 0;
  4722. schedule_delayed_work(&vha->scan.scan_work, 5);
  4723. } else {
  4724. qla24xx_fcport_handle_login(vha, fcport);
  4725. }
  4726. }
  4727. }
  4728. if (free_fcport) {
  4729. qla2x00_free_fcport(fcport);
  4730. if (pla) {
  4731. list_del(&pla->list);
  4732. kmem_cache_free(qla_tgt_plogi_cachep, pla);
  4733. }
  4734. }
  4735. }
  4736. static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e)
  4737. {
  4738. struct srb *sp = e->u.iosb.sp;
  4739. int rval;
  4740. rval = qla2x00_start_sp(sp);
  4741. if (rval != QLA_SUCCESS) {
  4742. ql_dbg(ql_dbg_disc, vha, 0x2043,
  4743. "%s: %s: Re-issue IOCB failed (%d).\n",
  4744. __func__, sp->name, rval);
  4745. qla24xx_sp_unmap(vha, sp);
  4746. }
  4747. }
  4748. void
  4749. qla2x00_do_work(struct scsi_qla_host *vha)
  4750. {
  4751. struct qla_work_evt *e, *tmp;
  4752. unsigned long flags;
  4753. LIST_HEAD(work);
  4754. int rc;
  4755. spin_lock_irqsave(&vha->work_lock, flags);
  4756. list_splice_init(&vha->work_list, &work);
  4757. spin_unlock_irqrestore(&vha->work_lock, flags);
  4758. list_for_each_entry_safe(e, tmp, &work, list) {
  4759. rc = QLA_SUCCESS;
  4760. switch (e->type) {
  4761. case QLA_EVT_AEN:
  4762. fc_host_post_event(vha->host, fc_get_event_number(),
  4763. e->u.aen.code, e->u.aen.data);
  4764. break;
  4765. case QLA_EVT_IDC_ACK:
  4766. qla81xx_idc_ack(vha, e->u.idc_ack.mb);
  4767. break;
  4768. case QLA_EVT_ASYNC_LOGIN:
  4769. qla2x00_async_login(vha, e->u.logio.fcport,
  4770. e->u.logio.data);
  4771. break;
  4772. case QLA_EVT_ASYNC_LOGOUT:
  4773. rc = qla2x00_async_logout(vha, e->u.logio.fcport);
  4774. break;
  4775. case QLA_EVT_ASYNC_ADISC:
  4776. qla2x00_async_adisc(vha, e->u.logio.fcport,
  4777. e->u.logio.data);
  4778. break;
  4779. case QLA_EVT_UEVENT:
  4780. qla2x00_uevent_emit(vha, e->u.uevent.code);
  4781. break;
  4782. case QLA_EVT_AENFX:
  4783. qlafx00_process_aen(vha, e);
  4784. break;
  4785. case QLA_EVT_GPNID:
  4786. qla24xx_async_gpnid(vha, &e->u.gpnid.id);
  4787. break;
  4788. case QLA_EVT_UNMAP:
  4789. qla24xx_sp_unmap(vha, e->u.iosb.sp);
  4790. break;
  4791. case QLA_EVT_RELOGIN:
  4792. qla2x00_relogin(vha);
  4793. break;
  4794. case QLA_EVT_NEW_SESS:
  4795. qla24xx_create_new_sess(vha, e);
  4796. break;
  4797. case QLA_EVT_GPDB:
  4798. qla24xx_async_gpdb(vha, e->u.fcport.fcport,
  4799. e->u.fcport.opt);
  4800. break;
  4801. case QLA_EVT_PRLI:
  4802. qla24xx_async_prli(vha, e->u.fcport.fcport);
  4803. break;
  4804. case QLA_EVT_GPSC:
  4805. qla24xx_async_gpsc(vha, e->u.fcport.fcport);
  4806. break;
  4807. case QLA_EVT_GNL:
  4808. qla24xx_async_gnl(vha, e->u.fcport.fcport);
  4809. break;
  4810. case QLA_EVT_NACK:
  4811. qla24xx_do_nack_work(vha, e);
  4812. break;
  4813. case QLA_EVT_ASYNC_PRLO:
  4814. rc = qla2x00_async_prlo(vha, e->u.logio.fcport);
  4815. break;
  4816. case QLA_EVT_ASYNC_PRLO_DONE:
  4817. qla2x00_async_prlo_done(vha, e->u.logio.fcport,
  4818. e->u.logio.data);
  4819. break;
  4820. case QLA_EVT_GPNFT:
  4821. qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type,
  4822. e->u.gpnft.sp);
  4823. break;
  4824. case QLA_EVT_GPNFT_DONE:
  4825. qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
  4826. break;
  4827. case QLA_EVT_GNNFT_DONE:
  4828. qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
  4829. break;
  4830. case QLA_EVT_GNNID:
  4831. qla24xx_async_gnnid(vha, e->u.fcport.fcport);
  4832. break;
  4833. case QLA_EVT_GFPNID:
  4834. qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
  4835. break;
  4836. case QLA_EVT_SP_RETRY:
  4837. qla_sp_retry(vha, e);
  4838. break;
  4839. case QLA_EVT_IIDMA:
  4840. qla_do_iidma_work(vha, e->u.fcport.fcport);
  4841. break;
  4842. case QLA_EVT_ELS_PLOGI:
  4843. qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
  4844. e->u.fcport.fcport, false);
  4845. break;
  4846. case QLA_EVT_SA_REPLACE:
  4847. rc = qla24xx_issue_sa_replace_iocb(vha, e);
  4848. break;
  4849. }
  4850. if (rc == EAGAIN) {
  4851. /* put 'work' at head of 'vha->work_list' */
  4852. spin_lock_irqsave(&vha->work_lock, flags);
  4853. list_splice(&work, &vha->work_list);
  4854. spin_unlock_irqrestore(&vha->work_lock, flags);
  4855. break;
  4856. }
  4857. list_del_init(&e->list);
  4858. if (e->flags & QLA_EVT_FLAG_FREE)
  4859. kfree(e);
  4860. /* For each work completed decrement vha ref count */
  4861. QLA_VHA_MARK_NOT_BUSY(vha);
  4862. }
  4863. }
  4864. int qla24xx_post_relogin_work(struct scsi_qla_host *vha)
  4865. {
  4866. struct qla_work_evt *e;
  4867. e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN);
  4868. if (!e) {
  4869. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  4870. return QLA_FUNCTION_FAILED;
  4871. }
  4872. return qla2x00_post_work(vha, e);
  4873. }
  4874. /* Relogins all the fcports of a vport
  4875. * Context: dpc thread
  4876. */
  4877. void qla2x00_relogin(struct scsi_qla_host *vha)
  4878. {
  4879. fc_port_t *fcport;
  4880. int status, relogin_needed = 0;
  4881. struct event_arg ea;
  4882. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  4883. /*
  4884. * If the port is not ONLINE then try to login
  4885. * to it if we haven't run out of retries.
  4886. */
  4887. if (atomic_read(&fcport->state) != FCS_ONLINE &&
  4888. fcport->login_retry) {
  4889. if (fcport->scan_state != QLA_FCPORT_FOUND ||
  4890. fcport->disc_state == DSC_LOGIN_AUTH_PEND ||
  4891. fcport->disc_state == DSC_LOGIN_COMPLETE)
  4892. continue;
  4893. if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) ||
  4894. fcport->disc_state == DSC_DELETE_PEND) {
  4895. relogin_needed = 1;
  4896. } else {
  4897. if (vha->hw->current_topology != ISP_CFG_NL) {
  4898. memset(&ea, 0, sizeof(ea));
  4899. ea.fcport = fcport;
  4900. qla24xx_handle_relogin_event(vha, &ea);
  4901. } else if (vha->hw->current_topology ==
  4902. ISP_CFG_NL &&
  4903. IS_QLA2XXX_MIDTYPE(vha->hw)) {
  4904. (void)qla24xx_fcport_handle_login(vha,
  4905. fcport);
  4906. } else if (vha->hw->current_topology ==
  4907. ISP_CFG_NL) {
  4908. fcport->login_retry--;
  4909. status =
  4910. qla2x00_local_device_login(vha,
  4911. fcport);
  4912. if (status == QLA_SUCCESS) {
  4913. fcport->old_loop_id =
  4914. fcport->loop_id;
  4915. ql_dbg(ql_dbg_disc, vha, 0x2003,
  4916. "Port login OK: logged in ID 0x%x.\n",
  4917. fcport->loop_id);
  4918. qla2x00_update_fcport
  4919. (vha, fcport);
  4920. } else if (status == 1) {
  4921. set_bit(RELOGIN_NEEDED,
  4922. &vha->dpc_flags);
  4923. /* retry the login again */
  4924. ql_dbg(ql_dbg_disc, vha, 0x2007,
  4925. "Retrying %d login again loop_id 0x%x.\n",
  4926. fcport->login_retry,
  4927. fcport->loop_id);
  4928. } else {
  4929. fcport->login_retry = 0;
  4930. }
  4931. if (fcport->login_retry == 0 &&
  4932. status != QLA_SUCCESS)
  4933. qla2x00_clear_loop_id(fcport);
  4934. }
  4935. }
  4936. }
  4937. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  4938. break;
  4939. }
  4940. if (relogin_needed)
  4941. set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  4942. ql_dbg(ql_dbg_disc, vha, 0x400e,
  4943. "Relogin end.\n");
  4944. }
  4945. /* Schedule work on any of the dpc-workqueues */
  4946. void
  4947. qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code)
  4948. {
  4949. struct qla_hw_data *ha = base_vha->hw;
  4950. switch (work_code) {
  4951. case MBA_IDC_AEN: /* 0x8200 */
  4952. if (ha->dpc_lp_wq)
  4953. queue_work(ha->dpc_lp_wq, &ha->idc_aen);
  4954. break;
  4955. case QLA83XX_NIC_CORE_RESET: /* 0x1 */
  4956. if (!ha->flags.nic_core_reset_hdlr_active) {
  4957. if (ha->dpc_hp_wq)
  4958. queue_work(ha->dpc_hp_wq, &ha->nic_core_reset);
  4959. } else
  4960. ql_dbg(ql_dbg_p3p, base_vha, 0xb05e,
  4961. "NIC Core reset is already active. Skip "
  4962. "scheduling it again.\n");
  4963. break;
  4964. case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */
  4965. if (ha->dpc_hp_wq)
  4966. queue_work(ha->dpc_hp_wq, &ha->idc_state_handler);
  4967. break;
  4968. case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */
  4969. if (ha->dpc_hp_wq)
  4970. queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable);
  4971. break;
  4972. default:
  4973. ql_log(ql_log_warn, base_vha, 0xb05f,
  4974. "Unknown work-code=0x%x.\n", work_code);
  4975. }
  4976. return;
  4977. }
  4978. /* Work: Perform NIC Core Unrecoverable state handling */
  4979. void
  4980. qla83xx_nic_core_unrecoverable_work(struct work_struct *work)
  4981. {
  4982. struct qla_hw_data *ha =
  4983. container_of(work, struct qla_hw_data, nic_core_unrecoverable);
  4984. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  4985. uint32_t dev_state = 0;
  4986. qla83xx_idc_lock(base_vha, 0);
  4987. qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
  4988. qla83xx_reset_ownership(base_vha);
  4989. if (ha->flags.nic_core_reset_owner) {
  4990. ha->flags.nic_core_reset_owner = 0;
  4991. qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
  4992. QLA8XXX_DEV_FAILED);
  4993. ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n");
  4994. qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
  4995. }
  4996. qla83xx_idc_unlock(base_vha, 0);
  4997. }
  4998. /* Work: Execute IDC state handler */
  4999. void
  5000. qla83xx_idc_state_handler_work(struct work_struct *work)
  5001. {
  5002. struct qla_hw_data *ha =
  5003. container_of(work, struct qla_hw_data, idc_state_handler);
  5004. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  5005. uint32_t dev_state = 0;
  5006. qla83xx_idc_lock(base_vha, 0);
  5007. qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
  5008. if (dev_state == QLA8XXX_DEV_FAILED ||
  5009. dev_state == QLA8XXX_DEV_NEED_QUIESCENT)
  5010. qla83xx_idc_state_handler(base_vha);
  5011. qla83xx_idc_unlock(base_vha, 0);
  5012. }
  5013. static int
  5014. qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
  5015. {
  5016. int rval = QLA_SUCCESS;
  5017. unsigned long heart_beat_wait = jiffies + (1 * HZ);
  5018. uint32_t heart_beat_counter1, heart_beat_counter2;
  5019. do {
  5020. if (time_after(jiffies, heart_beat_wait)) {
  5021. ql_dbg(ql_dbg_p3p, base_vha, 0xb07c,
  5022. "Nic Core f/w is not alive.\n");
  5023. rval = QLA_FUNCTION_FAILED;
  5024. break;
  5025. }
  5026. qla83xx_idc_lock(base_vha, 0);
  5027. qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
  5028. &heart_beat_counter1);
  5029. qla83xx_idc_unlock(base_vha, 0);
  5030. msleep(100);
  5031. qla83xx_idc_lock(base_vha, 0);
  5032. qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
  5033. &heart_beat_counter2);
  5034. qla83xx_idc_unlock(base_vha, 0);
  5035. } while (heart_beat_counter1 == heart_beat_counter2);
  5036. return rval;
  5037. }
  5038. /* Work: Perform NIC Core Reset handling */
  5039. void
  5040. qla83xx_nic_core_reset_work(struct work_struct *work)
  5041. {
  5042. struct qla_hw_data *ha =
  5043. container_of(work, struct qla_hw_data, nic_core_reset);
  5044. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  5045. uint32_t dev_state = 0;
  5046. if (IS_QLA2031(ha)) {
  5047. if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS)
  5048. ql_log(ql_log_warn, base_vha, 0xb081,
  5049. "Failed to dump mctp\n");
  5050. return;
  5051. }
  5052. if (!ha->flags.nic_core_reset_hdlr_active) {
  5053. if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) {
  5054. qla83xx_idc_lock(base_vha, 0);
  5055. qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE,
  5056. &dev_state);
  5057. qla83xx_idc_unlock(base_vha, 0);
  5058. if (dev_state != QLA8XXX_DEV_NEED_RESET) {
  5059. ql_dbg(ql_dbg_p3p, base_vha, 0xb07a,
  5060. "Nic Core f/w is alive.\n");
  5061. return;
  5062. }
  5063. }
  5064. ha->flags.nic_core_reset_hdlr_active = 1;
  5065. if (qla83xx_nic_core_reset(base_vha)) {
  5066. /* NIC Core reset failed. */
  5067. ql_dbg(ql_dbg_p3p, base_vha, 0xb061,
  5068. "NIC Core reset failed.\n");
  5069. }
  5070. ha->flags.nic_core_reset_hdlr_active = 0;
  5071. }
  5072. }
  5073. /* Work: Handle 8200 IDC aens */
  5074. void
  5075. qla83xx_service_idc_aen(struct work_struct *work)
  5076. {
  5077. struct qla_hw_data *ha =
  5078. container_of(work, struct qla_hw_data, idc_aen);
  5079. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  5080. uint32_t dev_state, idc_control;
  5081. qla83xx_idc_lock(base_vha, 0);
  5082. qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
  5083. qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control);
  5084. qla83xx_idc_unlock(base_vha, 0);
  5085. if (dev_state == QLA8XXX_DEV_NEED_RESET) {
  5086. if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) {
  5087. ql_dbg(ql_dbg_p3p, base_vha, 0xb062,
  5088. "Application requested NIC Core Reset.\n");
  5089. qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
  5090. } else if (qla83xx_check_nic_core_fw_alive(base_vha) ==
  5091. QLA_SUCCESS) {
  5092. ql_dbg(ql_dbg_p3p, base_vha, 0xb07b,
  5093. "Other protocol driver requested NIC Core Reset.\n");
  5094. qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
  5095. }
  5096. } else if (dev_state == QLA8XXX_DEV_FAILED ||
  5097. dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
  5098. qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
  5099. }
  5100. }
  5101. /*
  5102. * Control the frequency of IDC lock retries
  5103. */
  5104. #define QLA83XX_WAIT_LOGIC_MS 100
  5105. static int
  5106. qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
  5107. {
  5108. int rval;
  5109. uint32_t data;
  5110. uint32_t idc_lck_rcvry_stage_mask = 0x3;
  5111. uint32_t idc_lck_rcvry_owner_mask = 0x3c;
  5112. struct qla_hw_data *ha = base_vha->hw;
  5113. ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
  5114. "Trying force recovery of the IDC lock.\n");
  5115. rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
  5116. if (rval)
  5117. return rval;
  5118. if ((data & idc_lck_rcvry_stage_mask) > 0) {
  5119. return QLA_SUCCESS;
  5120. } else {
  5121. data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2);
  5122. rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
  5123. data);
  5124. if (rval)
  5125. return rval;
  5126. msleep(200);
  5127. rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
  5128. &data);
  5129. if (rval)
  5130. return rval;
  5131. if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) {
  5132. data &= (IDC_LOCK_RECOVERY_STAGE2 |
  5133. ~(idc_lck_rcvry_stage_mask));
  5134. rval = qla83xx_wr_reg(base_vha,
  5135. QLA83XX_IDC_LOCK_RECOVERY, data);
  5136. if (rval)
  5137. return rval;
  5138. /* Forcefully perform IDC UnLock */
  5139. rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK,
  5140. &data);
  5141. if (rval)
  5142. return rval;
  5143. /* Clear lock-id by setting 0xff */
  5144. rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
  5145. 0xff);
  5146. if (rval)
  5147. return rval;
  5148. /* Clear lock-recovery by setting 0x0 */
  5149. rval = qla83xx_wr_reg(base_vha,
  5150. QLA83XX_IDC_LOCK_RECOVERY, 0x0);
  5151. if (rval)
  5152. return rval;
  5153. } else
  5154. return QLA_SUCCESS;
  5155. }
  5156. return rval;
  5157. }
  5158. static int
  5159. qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
  5160. {
  5161. int rval = QLA_SUCCESS;
  5162. uint32_t o_drv_lockid, n_drv_lockid;
  5163. unsigned long lock_recovery_timeout;
  5164. lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT;
  5165. retry_lockid:
  5166. rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid);
  5167. if (rval)
  5168. goto exit;
  5169. /* MAX wait time before forcing IDC Lock recovery = 2 secs */
  5170. if (time_after_eq(jiffies, lock_recovery_timeout)) {
  5171. if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS)
  5172. return QLA_SUCCESS;
  5173. else
  5174. return QLA_FUNCTION_FAILED;
  5175. }
  5176. rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid);
  5177. if (rval)
  5178. goto exit;
  5179. if (o_drv_lockid == n_drv_lockid) {
  5180. msleep(QLA83XX_WAIT_LOGIC_MS);
  5181. goto retry_lockid;
  5182. } else
  5183. return QLA_SUCCESS;
  5184. exit:
  5185. return rval;
  5186. }
  5187. /*
  5188. * Context: task, can sleep
  5189. */
  5190. void
  5191. qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
  5192. {
  5193. uint32_t data;
  5194. uint32_t lock_owner;
  5195. struct qla_hw_data *ha = base_vha->hw;
  5196. might_sleep();
  5197. /* IDC-lock implementation using driver-lock/lock-id remote registers */
  5198. retry_lock:
  5199. if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
  5200. == QLA_SUCCESS) {
  5201. if (data) {
  5202. /* Setting lock-id to our function-number */
  5203. qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
  5204. ha->portnum);
  5205. } else {
  5206. qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID,
  5207. &lock_owner);
  5208. ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
  5209. "Failed to acquire IDC lock, acquired by %d, "
  5210. "retrying...\n", lock_owner);
  5211. /* Retry/Perform IDC-Lock recovery */
  5212. if (qla83xx_idc_lock_recovery(base_vha)
  5213. == QLA_SUCCESS) {
  5214. msleep(QLA83XX_WAIT_LOGIC_MS);
  5215. goto retry_lock;
  5216. } else
  5217. ql_log(ql_log_warn, base_vha, 0xb075,
  5218. "IDC Lock recovery FAILED.\n");
  5219. }
  5220. }
  5221. return;
  5222. }
  5223. static bool
  5224. qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha,
  5225. struct purex_entry_24xx *purex)
  5226. {
  5227. char fwstr[16];
  5228. u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0];
  5229. struct port_database_24xx *pdb;
  5230. /* Domain Controller is always logged-out. */
  5231. /* if RDP request is not from Domain Controller: */
  5232. if (sid != 0xfffc01)
  5233. return false;
  5234. ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid);
  5235. pdb = kzalloc(sizeof(*pdb), GFP_KERNEL);
  5236. if (!pdb) {
  5237. ql_dbg(ql_dbg_init, vha, 0x0181,
  5238. "%s: Failed allocate pdb\n", __func__);
  5239. } else if (qla24xx_get_port_database(vha,
  5240. le16_to_cpu(purex->nport_handle), pdb)) {
  5241. ql_dbg(ql_dbg_init, vha, 0x0181,
  5242. "%s: Failed get pdb sid=%x\n", __func__, sid);
  5243. } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE &&
  5244. pdb->current_login_state != PDS_PRLI_COMPLETE) {
  5245. ql_dbg(ql_dbg_init, vha, 0x0181,
  5246. "%s: Port not logged in sid=%#x\n", __func__, sid);
  5247. } else {
  5248. /* RDP request is from logged in port */
  5249. kfree(pdb);
  5250. return false;
  5251. }
  5252. kfree(pdb);
  5253. vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr));
  5254. fwstr[strcspn(fwstr, " ")] = 0;
  5255. /* if FW version allows RDP response length upto 2048 bytes: */
  5256. if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0)
  5257. return false;
  5258. ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr);
  5259. /* RDP response length is to be reduced to maximum 256 bytes */
  5260. return true;
  5261. }
  5262. /*
  5263. * Function Name: qla24xx_process_purex_iocb
  5264. *
  5265. * Description:
  5266. * Prepare a RDP response and send to Fabric switch
  5267. *
  5268. * PARAMETERS:
  5269. * vha: SCSI qla host
  5270. * purex: RDP request received by HBA
  5271. */
  5272. void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
  5273. struct purex_item *item)
  5274. {
  5275. struct qla_hw_data *ha = vha->hw;
  5276. struct purex_entry_24xx *purex =
  5277. (struct purex_entry_24xx *)&item->iocb;
  5278. dma_addr_t rsp_els_dma;
  5279. dma_addr_t rsp_payload_dma;
  5280. dma_addr_t stat_dma;
  5281. dma_addr_t sfp_dma;
  5282. struct els_entry_24xx *rsp_els = NULL;
  5283. struct rdp_rsp_payload *rsp_payload = NULL;
  5284. struct link_statistics *stat = NULL;
  5285. uint8_t *sfp = NULL;
  5286. uint16_t sfp_flags = 0;
  5287. uint rsp_payload_length = sizeof(*rsp_payload);
  5288. int rval;
  5289. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180,
  5290. "%s: Enter\n", __func__);
  5291. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181,
  5292. "-------- ELS REQ -------\n");
  5293. ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182,
  5294. purex, sizeof(*purex));
  5295. if (qla25xx_rdp_rsp_reduce_size(vha, purex)) {
  5296. rsp_payload_length =
  5297. offsetof(typeof(*rsp_payload), optical_elmt_desc);
  5298. ql_dbg(ql_dbg_init, vha, 0x0181,
  5299. "Reducing RSP payload length to %u bytes...\n",
  5300. rsp_payload_length);
  5301. }
  5302. rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els),
  5303. &rsp_els_dma, GFP_KERNEL);
  5304. if (!rsp_els) {
  5305. ql_log(ql_log_warn, vha, 0x0183,
  5306. "Failed allocate dma buffer ELS RSP.\n");
  5307. goto dealloc;
  5308. }
  5309. rsp_payload = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
  5310. &rsp_payload_dma, GFP_KERNEL);
  5311. if (!rsp_payload) {
  5312. ql_log(ql_log_warn, vha, 0x0184,
  5313. "Failed allocate dma buffer ELS RSP payload.\n");
  5314. goto dealloc;
  5315. }
  5316. sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
  5317. &sfp_dma, GFP_KERNEL);
  5318. stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat),
  5319. &stat_dma, GFP_KERNEL);
  5320. /* Prepare Response IOCB */
  5321. rsp_els->entry_type = ELS_IOCB_TYPE;
  5322. rsp_els->entry_count = 1;
  5323. rsp_els->sys_define = 0;
  5324. rsp_els->entry_status = 0;
  5325. rsp_els->handle = 0;
  5326. rsp_els->nport_handle = purex->nport_handle;
  5327. rsp_els->tx_dsd_count = cpu_to_le16(1);
  5328. rsp_els->vp_index = purex->vp_idx;
  5329. rsp_els->sof_type = EST_SOFI3;
  5330. rsp_els->rx_xchg_address = purex->rx_xchg_addr;
  5331. rsp_els->rx_dsd_count = 0;
  5332. rsp_els->opcode = purex->els_frame_payload[0];
  5333. rsp_els->d_id[0] = purex->s_id[0];
  5334. rsp_els->d_id[1] = purex->s_id[1];
  5335. rsp_els->d_id[2] = purex->s_id[2];
  5336. rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC);
  5337. rsp_els->rx_byte_count = 0;
  5338. rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length);
  5339. put_unaligned_le64(rsp_payload_dma, &rsp_els->tx_address);
  5340. rsp_els->tx_len = rsp_els->tx_byte_count;
  5341. rsp_els->rx_address = 0;
  5342. rsp_els->rx_len = 0;
  5343. /* Prepare Response Payload */
  5344. rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */
  5345. rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) -
  5346. sizeof(rsp_payload->hdr));
  5347. /* Link service Request Info Descriptor */
  5348. rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1);
  5349. rsp_payload->ls_req_info_desc.desc_len =
  5350. cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc));
  5351. rsp_payload->ls_req_info_desc.req_payload_word_0 =
  5352. cpu_to_be32p((uint32_t *)purex->els_frame_payload);
  5353. /* Link service Request Info Descriptor 2 */
  5354. rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1);
  5355. rsp_payload->ls_req_info_desc2.desc_len =
  5356. cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2));
  5357. rsp_payload->ls_req_info_desc2.req_payload_word_0 =
  5358. cpu_to_be32p((uint32_t *)purex->els_frame_payload);
  5359. rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000);
  5360. rsp_payload->sfp_diag_desc.desc_len =
  5361. cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc));
  5362. if (sfp) {
  5363. /* SFP Flags */
  5364. memset(sfp, 0, SFP_RTDI_LEN);
  5365. rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0);
  5366. if (!rval) {
  5367. /* SFP Flags bits 3-0: Port Tx Laser Type */
  5368. if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5))
  5369. sfp_flags |= BIT_0; /* short wave */
  5370. else if (sfp[0] & BIT_1)
  5371. sfp_flags |= BIT_1; /* long wave 1310nm */
  5372. else if (sfp[1] & BIT_4)
  5373. sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */
  5374. }
  5375. /* SFP Type */
  5376. memset(sfp, 0, SFP_RTDI_LEN);
  5377. rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0);
  5378. if (!rval) {
  5379. sfp_flags |= BIT_4; /* optical */
  5380. if (sfp[0] == 0x3)
  5381. sfp_flags |= BIT_6; /* sfp+ */
  5382. }
  5383. rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags);
  5384. /* SFP Diagnostics */
  5385. memset(sfp, 0, SFP_RTDI_LEN);
  5386. rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0);
  5387. if (!rval) {
  5388. __be16 *trx = (__force __be16 *)sfp; /* already be16 */
  5389. rsp_payload->sfp_diag_desc.temperature = trx[0];
  5390. rsp_payload->sfp_diag_desc.vcc = trx[1];
  5391. rsp_payload->sfp_diag_desc.tx_bias = trx[2];
  5392. rsp_payload->sfp_diag_desc.tx_power = trx[3];
  5393. rsp_payload->sfp_diag_desc.rx_power = trx[4];
  5394. }
  5395. }
  5396. /* Port Speed Descriptor */
  5397. rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001);
  5398. rsp_payload->port_speed_desc.desc_len =
  5399. cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc));
  5400. rsp_payload->port_speed_desc.speed_capab = cpu_to_be16(
  5401. qla25xx_fdmi_port_speed_capability(ha));
  5402. rsp_payload->port_speed_desc.operating_speed = cpu_to_be16(
  5403. qla25xx_fdmi_port_speed_currently(ha));
  5404. /* Link Error Status Descriptor */
  5405. rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002);
  5406. rsp_payload->ls_err_desc.desc_len =
  5407. cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc));
  5408. if (stat) {
  5409. rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0);
  5410. if (!rval) {
  5411. rsp_payload->ls_err_desc.link_fail_cnt =
  5412. cpu_to_be32(le32_to_cpu(stat->link_fail_cnt));
  5413. rsp_payload->ls_err_desc.loss_sync_cnt =
  5414. cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt));
  5415. rsp_payload->ls_err_desc.loss_sig_cnt =
  5416. cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt));
  5417. rsp_payload->ls_err_desc.prim_seq_err_cnt =
  5418. cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt));
  5419. rsp_payload->ls_err_desc.inval_xmit_word_cnt =
  5420. cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt));
  5421. rsp_payload->ls_err_desc.inval_crc_cnt =
  5422. cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt));
  5423. rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6;
  5424. }
  5425. }
  5426. /* Portname Descriptor */
  5427. rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003);
  5428. rsp_payload->port_name_diag_desc.desc_len =
  5429. cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc));
  5430. memcpy(rsp_payload->port_name_diag_desc.WWNN,
  5431. vha->node_name,
  5432. sizeof(rsp_payload->port_name_diag_desc.WWNN));
  5433. memcpy(rsp_payload->port_name_diag_desc.WWPN,
  5434. vha->port_name,
  5435. sizeof(rsp_payload->port_name_diag_desc.WWPN));
  5436. /* F-Port Portname Descriptor */
  5437. rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003);
  5438. rsp_payload->port_name_direct_desc.desc_len =
  5439. cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc));
  5440. memcpy(rsp_payload->port_name_direct_desc.WWNN,
  5441. vha->fabric_node_name,
  5442. sizeof(rsp_payload->port_name_direct_desc.WWNN));
  5443. memcpy(rsp_payload->port_name_direct_desc.WWPN,
  5444. vha->fabric_port_name,
  5445. sizeof(rsp_payload->port_name_direct_desc.WWPN));
  5446. /* Bufer Credit Descriptor */
  5447. rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006);
  5448. rsp_payload->buffer_credit_desc.desc_len =
  5449. cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc));
  5450. rsp_payload->buffer_credit_desc.fcport_b2b = 0;
  5451. rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0);
  5452. rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0);
  5453. if (ha->flags.plogi_template_valid) {
  5454. uint32_t tmp =
  5455. be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred);
  5456. rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp);
  5457. }
  5458. if (rsp_payload_length < sizeof(*rsp_payload))
  5459. goto send;
  5460. /* Optical Element Descriptor, Temperature */
  5461. rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007);
  5462. rsp_payload->optical_elmt_desc[0].desc_len =
  5463. cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
  5464. /* Optical Element Descriptor, Voltage */
  5465. rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007);
  5466. rsp_payload->optical_elmt_desc[1].desc_len =
  5467. cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
  5468. /* Optical Element Descriptor, Tx Bias Current */
  5469. rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007);
  5470. rsp_payload->optical_elmt_desc[2].desc_len =
  5471. cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
  5472. /* Optical Element Descriptor, Tx Power */
  5473. rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007);
  5474. rsp_payload->optical_elmt_desc[3].desc_len =
  5475. cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
  5476. /* Optical Element Descriptor, Rx Power */
  5477. rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007);
  5478. rsp_payload->optical_elmt_desc[4].desc_len =
  5479. cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
  5480. if (sfp) {
  5481. memset(sfp, 0, SFP_RTDI_LEN);
  5482. rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0);
  5483. if (!rval) {
  5484. __be16 *trx = (__force __be16 *)sfp; /* already be16 */
  5485. /* Optical Element Descriptor, Temperature */
  5486. rsp_payload->optical_elmt_desc[0].high_alarm = trx[0];
  5487. rsp_payload->optical_elmt_desc[0].low_alarm = trx[1];
  5488. rsp_payload->optical_elmt_desc[0].high_warn = trx[2];
  5489. rsp_payload->optical_elmt_desc[0].low_warn = trx[3];
  5490. rsp_payload->optical_elmt_desc[0].element_flags =
  5491. cpu_to_be32(1 << 28);
  5492. /* Optical Element Descriptor, Voltage */
  5493. rsp_payload->optical_elmt_desc[1].high_alarm = trx[4];
  5494. rsp_payload->optical_elmt_desc[1].low_alarm = trx[5];
  5495. rsp_payload->optical_elmt_desc[1].high_warn = trx[6];
  5496. rsp_payload->optical_elmt_desc[1].low_warn = trx[7];
  5497. rsp_payload->optical_elmt_desc[1].element_flags =
  5498. cpu_to_be32(2 << 28);
  5499. /* Optical Element Descriptor, Tx Bias Current */
  5500. rsp_payload->optical_elmt_desc[2].high_alarm = trx[8];
  5501. rsp_payload->optical_elmt_desc[2].low_alarm = trx[9];
  5502. rsp_payload->optical_elmt_desc[2].high_warn = trx[10];
  5503. rsp_payload->optical_elmt_desc[2].low_warn = trx[11];
  5504. rsp_payload->optical_elmt_desc[2].element_flags =
  5505. cpu_to_be32(3 << 28);
  5506. /* Optical Element Descriptor, Tx Power */
  5507. rsp_payload->optical_elmt_desc[3].high_alarm = trx[12];
  5508. rsp_payload->optical_elmt_desc[3].low_alarm = trx[13];
  5509. rsp_payload->optical_elmt_desc[3].high_warn = trx[14];
  5510. rsp_payload->optical_elmt_desc[3].low_warn = trx[15];
  5511. rsp_payload->optical_elmt_desc[3].element_flags =
  5512. cpu_to_be32(4 << 28);
  5513. /* Optical Element Descriptor, Rx Power */
  5514. rsp_payload->optical_elmt_desc[4].high_alarm = trx[16];
  5515. rsp_payload->optical_elmt_desc[4].low_alarm = trx[17];
  5516. rsp_payload->optical_elmt_desc[4].high_warn = trx[18];
  5517. rsp_payload->optical_elmt_desc[4].low_warn = trx[19];
  5518. rsp_payload->optical_elmt_desc[4].element_flags =
  5519. cpu_to_be32(5 << 28);
  5520. }
  5521. memset(sfp, 0, SFP_RTDI_LEN);
  5522. rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0);
  5523. if (!rval) {
  5524. /* Temperature high/low alarm/warning */
  5525. rsp_payload->optical_elmt_desc[0].element_flags |=
  5526. cpu_to_be32(
  5527. (sfp[0] >> 7 & 1) << 3 |
  5528. (sfp[0] >> 6 & 1) << 2 |
  5529. (sfp[4] >> 7 & 1) << 1 |
  5530. (sfp[4] >> 6 & 1) << 0);
  5531. /* Voltage high/low alarm/warning */
  5532. rsp_payload->optical_elmt_desc[1].element_flags |=
  5533. cpu_to_be32(
  5534. (sfp[0] >> 5 & 1) << 3 |
  5535. (sfp[0] >> 4 & 1) << 2 |
  5536. (sfp[4] >> 5 & 1) << 1 |
  5537. (sfp[4] >> 4 & 1) << 0);
  5538. /* Tx Bias Current high/low alarm/warning */
  5539. rsp_payload->optical_elmt_desc[2].element_flags |=
  5540. cpu_to_be32(
  5541. (sfp[0] >> 3 & 1) << 3 |
  5542. (sfp[0] >> 2 & 1) << 2 |
  5543. (sfp[4] >> 3 & 1) << 1 |
  5544. (sfp[4] >> 2 & 1) << 0);
  5545. /* Tx Power high/low alarm/warning */
  5546. rsp_payload->optical_elmt_desc[3].element_flags |=
  5547. cpu_to_be32(
  5548. (sfp[0] >> 1 & 1) << 3 |
  5549. (sfp[0] >> 0 & 1) << 2 |
  5550. (sfp[4] >> 1 & 1) << 1 |
  5551. (sfp[4] >> 0 & 1) << 0);
  5552. /* Rx Power high/low alarm/warning */
  5553. rsp_payload->optical_elmt_desc[4].element_flags |=
  5554. cpu_to_be32(
  5555. (sfp[1] >> 7 & 1) << 3 |
  5556. (sfp[1] >> 6 & 1) << 2 |
  5557. (sfp[5] >> 7 & 1) << 1 |
  5558. (sfp[5] >> 6 & 1) << 0);
  5559. }
  5560. }
  5561. /* Optical Product Data Descriptor */
  5562. rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008);
  5563. rsp_payload->optical_prod_desc.desc_len =
  5564. cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc));
  5565. if (sfp) {
  5566. memset(sfp, 0, SFP_RTDI_LEN);
  5567. rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0);
  5568. if (!rval) {
  5569. memcpy(rsp_payload->optical_prod_desc.vendor_name,
  5570. sfp + 0,
  5571. sizeof(rsp_payload->optical_prod_desc.vendor_name));
  5572. memcpy(rsp_payload->optical_prod_desc.part_number,
  5573. sfp + 20,
  5574. sizeof(rsp_payload->optical_prod_desc.part_number));
  5575. memcpy(rsp_payload->optical_prod_desc.revision,
  5576. sfp + 36,
  5577. sizeof(rsp_payload->optical_prod_desc.revision));
  5578. memcpy(rsp_payload->optical_prod_desc.serial_number,
  5579. sfp + 48,
  5580. sizeof(rsp_payload->optical_prod_desc.serial_number));
  5581. }
  5582. memset(sfp, 0, SFP_RTDI_LEN);
  5583. rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0);
  5584. if (!rval) {
  5585. memcpy(rsp_payload->optical_prod_desc.date,
  5586. sfp + 0,
  5587. sizeof(rsp_payload->optical_prod_desc.date));
  5588. }
  5589. }
  5590. send:
  5591. ql_dbg(ql_dbg_init, vha, 0x0183,
  5592. "Sending ELS Response to RDP Request...\n");
  5593. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184,
  5594. "-------- ELS RSP -------\n");
  5595. ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185,
  5596. rsp_els, sizeof(*rsp_els));
  5597. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186,
  5598. "-------- ELS RSP PAYLOAD -------\n");
  5599. ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187,
  5600. rsp_payload, rsp_payload_length);
  5601. rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0);
  5602. if (rval) {
  5603. ql_log(ql_log_warn, vha, 0x0188,
  5604. "%s: iocb failed to execute -> %x\n", __func__, rval);
  5605. } else if (rsp_els->comp_status) {
  5606. ql_log(ql_log_warn, vha, 0x0189,
  5607. "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
  5608. __func__, rsp_els->comp_status,
  5609. rsp_els->error_subcode_1, rsp_els->error_subcode_2);
  5610. } else {
  5611. ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__);
  5612. }
  5613. dealloc:
  5614. if (stat)
  5615. dma_free_coherent(&ha->pdev->dev, sizeof(*stat),
  5616. stat, stat_dma);
  5617. if (sfp)
  5618. dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
  5619. sfp, sfp_dma);
  5620. if (rsp_payload)
  5621. dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
  5622. rsp_payload, rsp_payload_dma);
  5623. if (rsp_els)
  5624. dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els),
  5625. rsp_els, rsp_els_dma);
  5626. }
  5627. void
  5628. qla24xx_free_purex_item(struct purex_item *item)
  5629. {
  5630. if (item == &item->vha->default_item)
  5631. memset(&item->vha->default_item, 0, sizeof(struct purex_item));
  5632. else
  5633. kfree(item);
  5634. }
  5635. void qla24xx_process_purex_list(struct purex_list *list)
  5636. {
  5637. struct list_head head = LIST_HEAD_INIT(head);
  5638. struct purex_item *item, *next;
  5639. ulong flags;
  5640. spin_lock_irqsave(&list->lock, flags);
  5641. list_splice_init(&list->head, &head);
  5642. spin_unlock_irqrestore(&list->lock, flags);
  5643. list_for_each_entry_safe(item, next, &head, list) {
  5644. list_del(&item->list);
  5645. item->process_item(item->vha, item);
  5646. qla24xx_free_purex_item(item);
  5647. }
  5648. }
  5649. /*
  5650. * Context: task, can sleep
  5651. */
  5652. void
  5653. qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
  5654. {
  5655. #if 0
  5656. uint16_t options = (requester_id << 15) | BIT_7;
  5657. #endif
  5658. uint16_t retry;
  5659. uint32_t data;
  5660. struct qla_hw_data *ha = base_vha->hw;
  5661. might_sleep();
  5662. /* IDC-unlock implementation using driver-unlock/lock-id
  5663. * remote registers
  5664. */
  5665. retry = 0;
  5666. retry_unlock:
  5667. if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data)
  5668. == QLA_SUCCESS) {
  5669. if (data == ha->portnum) {
  5670. qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data);
  5671. /* Clearing lock-id by setting 0xff */
  5672. qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff);
  5673. } else if (retry < 10) {
  5674. /* SV: XXX: IDC unlock retrying needed here? */
  5675. /* Retry for IDC-unlock */
  5676. msleep(QLA83XX_WAIT_LOGIC_MS);
  5677. retry++;
  5678. ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
  5679. "Failed to release IDC lock, retrying=%d\n", retry);
  5680. goto retry_unlock;
  5681. }
  5682. } else if (retry < 10) {
  5683. /* Retry for IDC-unlock */
  5684. msleep(QLA83XX_WAIT_LOGIC_MS);
  5685. retry++;
  5686. ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
  5687. "Failed to read drv-lockid, retrying=%d\n", retry);
  5688. goto retry_unlock;
  5689. }
  5690. return;
  5691. #if 0
  5692. /* XXX: IDC-unlock implementation using access-control mbx */
  5693. retry = 0;
  5694. retry_unlock2:
  5695. if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
  5696. if (retry < 10) {
  5697. /* Retry for IDC-unlock */
  5698. msleep(QLA83XX_WAIT_LOGIC_MS);
  5699. retry++;
  5700. ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
  5701. "Failed to release IDC lock, retrying=%d\n", retry);
  5702. goto retry_unlock2;
  5703. }
  5704. }
  5705. return;
  5706. #endif
  5707. }
  5708. int
  5709. __qla83xx_set_drv_presence(scsi_qla_host_t *vha)
  5710. {
  5711. int rval = QLA_SUCCESS;
  5712. struct qla_hw_data *ha = vha->hw;
  5713. uint32_t drv_presence;
  5714. rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
  5715. if (rval == QLA_SUCCESS) {
  5716. drv_presence |= (1 << ha->portnum);
  5717. rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
  5718. drv_presence);
  5719. }
  5720. return rval;
  5721. }
  5722. int
  5723. qla83xx_set_drv_presence(scsi_qla_host_t *vha)
  5724. {
  5725. int rval = QLA_SUCCESS;
  5726. qla83xx_idc_lock(vha, 0);
  5727. rval = __qla83xx_set_drv_presence(vha);
  5728. qla83xx_idc_unlock(vha, 0);
  5729. return rval;
  5730. }
  5731. int
  5732. __qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
  5733. {
  5734. int rval = QLA_SUCCESS;
  5735. struct qla_hw_data *ha = vha->hw;
  5736. uint32_t drv_presence;
  5737. rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
  5738. if (rval == QLA_SUCCESS) {
  5739. drv_presence &= ~(1 << ha->portnum);
  5740. rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
  5741. drv_presence);
  5742. }
  5743. return rval;
  5744. }
  5745. int
  5746. qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
  5747. {
  5748. int rval = QLA_SUCCESS;
  5749. qla83xx_idc_lock(vha, 0);
  5750. rval = __qla83xx_clear_drv_presence(vha);
  5751. qla83xx_idc_unlock(vha, 0);
  5752. return rval;
  5753. }
  5754. static void
  5755. qla83xx_need_reset_handler(scsi_qla_host_t *vha)
  5756. {
  5757. struct qla_hw_data *ha = vha->hw;
  5758. uint32_t drv_ack, drv_presence;
  5759. unsigned long ack_timeout;
  5760. /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */
  5761. ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
  5762. while (1) {
  5763. qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
  5764. qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
  5765. if ((drv_ack & drv_presence) == drv_presence)
  5766. break;
  5767. if (time_after_eq(jiffies, ack_timeout)) {
  5768. ql_log(ql_log_warn, vha, 0xb067,
  5769. "RESET ACK TIMEOUT! drv_presence=0x%x "
  5770. "drv_ack=0x%x\n", drv_presence, drv_ack);
  5771. /*
  5772. * The function(s) which did not ack in time are forced
  5773. * to withdraw any further participation in the IDC
  5774. * reset.
  5775. */
  5776. if (drv_ack != drv_presence)
  5777. qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
  5778. drv_ack);
  5779. break;
  5780. }
  5781. qla83xx_idc_unlock(vha, 0);
  5782. msleep(1000);
  5783. qla83xx_idc_lock(vha, 0);
  5784. }
  5785. qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD);
  5786. ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
  5787. }
  5788. static int
  5789. qla83xx_device_bootstrap(scsi_qla_host_t *vha)
  5790. {
  5791. int rval = QLA_SUCCESS;
  5792. uint32_t idc_control;
  5793. qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
  5794. ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n");
  5795. /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */
  5796. __qla83xx_get_idc_control(vha, &idc_control);
  5797. idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET;
  5798. __qla83xx_set_idc_control(vha, 0);
  5799. qla83xx_idc_unlock(vha, 0);
  5800. rval = qla83xx_restart_nic_firmware(vha);
  5801. qla83xx_idc_lock(vha, 0);
  5802. if (rval != QLA_SUCCESS) {
  5803. ql_log(ql_log_fatal, vha, 0xb06a,
  5804. "Failed to restart NIC f/w.\n");
  5805. qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED);
  5806. ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n");
  5807. } else {
  5808. ql_dbg(ql_dbg_p3p, vha, 0xb06c,
  5809. "Success in restarting nic f/w.\n");
  5810. qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY);
  5811. ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n");
  5812. }
  5813. return rval;
  5814. }
  5815. /* Assumes idc_lock always held on entry */
  5816. int
  5817. qla83xx_idc_state_handler(scsi_qla_host_t *base_vha)
  5818. {
  5819. struct qla_hw_data *ha = base_vha->hw;
  5820. int rval = QLA_SUCCESS;
  5821. unsigned long dev_init_timeout;
  5822. uint32_t dev_state;
  5823. /* Wait for MAX-INIT-TIMEOUT for the device to go ready */
  5824. dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
  5825. while (1) {
  5826. if (time_after_eq(jiffies, dev_init_timeout)) {
  5827. ql_log(ql_log_warn, base_vha, 0xb06e,
  5828. "Initialization TIMEOUT!\n");
  5829. /* Init timeout. Disable further NIC Core
  5830. * communication.
  5831. */
  5832. qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
  5833. QLA8XXX_DEV_FAILED);
  5834. ql_log(ql_log_info, base_vha, 0xb06f,
  5835. "HW State: FAILED.\n");
  5836. }
  5837. qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
  5838. switch (dev_state) {
  5839. case QLA8XXX_DEV_READY:
  5840. if (ha->flags.nic_core_reset_owner)
  5841. qla83xx_idc_audit(base_vha,
  5842. IDC_AUDIT_COMPLETION);
  5843. ha->flags.nic_core_reset_owner = 0;
  5844. ql_dbg(ql_dbg_p3p, base_vha, 0xb070,
  5845. "Reset_owner reset by 0x%x.\n",
  5846. ha->portnum);
  5847. goto exit;
  5848. case QLA8XXX_DEV_COLD:
  5849. if (ha->flags.nic_core_reset_owner)
  5850. rval = qla83xx_device_bootstrap(base_vha);
  5851. else {
  5852. /* Wait for AEN to change device-state */
  5853. qla83xx_idc_unlock(base_vha, 0);
  5854. msleep(1000);
  5855. qla83xx_idc_lock(base_vha, 0);
  5856. }
  5857. break;
  5858. case QLA8XXX_DEV_INITIALIZING:
  5859. /* Wait for AEN to change device-state */
  5860. qla83xx_idc_unlock(base_vha, 0);
  5861. msleep(1000);
  5862. qla83xx_idc_lock(base_vha, 0);
  5863. break;
  5864. case QLA8XXX_DEV_NEED_RESET:
  5865. if (!ql2xdontresethba && ha->flags.nic_core_reset_owner)
  5866. qla83xx_need_reset_handler(base_vha);
  5867. else {
  5868. /* Wait for AEN to change device-state */
  5869. qla83xx_idc_unlock(base_vha, 0);
  5870. msleep(1000);
  5871. qla83xx_idc_lock(base_vha, 0);
  5872. }
  5873. /* reset timeout value after need reset handler */
  5874. dev_init_timeout = jiffies +
  5875. (ha->fcoe_dev_init_timeout * HZ);
  5876. break;
  5877. case QLA8XXX_DEV_NEED_QUIESCENT:
  5878. /* XXX: DEBUG for now */
  5879. qla83xx_idc_unlock(base_vha, 0);
  5880. msleep(1000);
  5881. qla83xx_idc_lock(base_vha, 0);
  5882. break;
  5883. case QLA8XXX_DEV_QUIESCENT:
  5884. /* XXX: DEBUG for now */
  5885. if (ha->flags.quiesce_owner)
  5886. goto exit;
  5887. qla83xx_idc_unlock(base_vha, 0);
  5888. msleep(1000);
  5889. qla83xx_idc_lock(base_vha, 0);
  5890. dev_init_timeout = jiffies +
  5891. (ha->fcoe_dev_init_timeout * HZ);
  5892. break;
  5893. case QLA8XXX_DEV_FAILED:
  5894. if (ha->flags.nic_core_reset_owner)
  5895. qla83xx_idc_audit(base_vha,
  5896. IDC_AUDIT_COMPLETION);
  5897. ha->flags.nic_core_reset_owner = 0;
  5898. __qla83xx_clear_drv_presence(base_vha);
  5899. qla83xx_idc_unlock(base_vha, 0);
  5900. qla8xxx_dev_failed_handler(base_vha);
  5901. rval = QLA_FUNCTION_FAILED;
  5902. qla83xx_idc_lock(base_vha, 0);
  5903. goto exit;
  5904. case QLA8XXX_BAD_VALUE:
  5905. qla83xx_idc_unlock(base_vha, 0);
  5906. msleep(1000);
  5907. qla83xx_idc_lock(base_vha, 0);
  5908. break;
  5909. default:
  5910. ql_log(ql_log_warn, base_vha, 0xb071,
  5911. "Unknown Device State: %x.\n", dev_state);
  5912. qla83xx_idc_unlock(base_vha, 0);
  5913. qla8xxx_dev_failed_handler(base_vha);
  5914. rval = QLA_FUNCTION_FAILED;
  5915. qla83xx_idc_lock(base_vha, 0);
  5916. goto exit;
  5917. }
  5918. }
  5919. exit:
  5920. return rval;
  5921. }
  5922. void
  5923. qla2x00_disable_board_on_pci_error(struct work_struct *work)
  5924. {
  5925. struct qla_hw_data *ha = container_of(work, struct qla_hw_data,
  5926. board_disable);
  5927. struct pci_dev *pdev = ha->pdev;
  5928. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  5929. ql_log(ql_log_warn, base_vha, 0x015b,
  5930. "Disabling adapter.\n");
  5931. if (!atomic_read(&pdev->enable_cnt)) {
  5932. ql_log(ql_log_info, base_vha, 0xfffc,
  5933. "PCI device disabled, no action req for PCI error=%lx\n",
  5934. base_vha->pci_flags);
  5935. return;
  5936. }
  5937. /*
  5938. * if UNLOADING flag is already set, then continue unload,
  5939. * where it was set first.
  5940. */
  5941. if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
  5942. return;
  5943. qla2x00_wait_for_sess_deletion(base_vha);
  5944. qla2x00_delete_all_vps(ha, base_vha);
  5945. qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
  5946. qla2x00_dfs_remove(base_vha);
  5947. qla84xx_put_chip(base_vha);
  5948. if (base_vha->timer_active)
  5949. qla2x00_stop_timer(base_vha);
  5950. base_vha->flags.online = 0;
  5951. qla2x00_destroy_deferred_work(ha);
  5952. /*
  5953. * Do not try to stop beacon blink as it will issue a mailbox
  5954. * command.
  5955. */
  5956. qla2x00_free_sysfs_attr(base_vha, false);
  5957. fc_remove_host(base_vha->host);
  5958. scsi_remove_host(base_vha->host);
  5959. base_vha->flags.init_done = 0;
  5960. qla25xx_delete_queues(base_vha);
  5961. qla2x00_free_fcports(base_vha);
  5962. qla2x00_free_irqs(base_vha);
  5963. qla2x00_mem_free(ha);
  5964. qla82xx_md_free(base_vha);
  5965. qla2x00_free_queues(ha);
  5966. qla2x00_unmap_iobases(ha);
  5967. pci_release_selected_regions(ha->pdev, ha->bars);
  5968. pci_disable_pcie_error_reporting(pdev);
  5969. pci_disable_device(pdev);
  5970. /*
  5971. * Let qla2x00_remove_one cleanup qla_hw_data on device removal.
  5972. */
  5973. }
  5974. /**************************************************************************
  5975. * qla2x00_do_dpc
  5976. * This kernel thread is a task that is schedule by the interrupt handler
  5977. * to perform the background processing for interrupts.
  5978. *
  5979. * Notes:
  5980. * This task always run in the context of a kernel thread. It
  5981. * is kick-off by the driver's detect code and starts up
  5982. * up one per adapter. It immediately goes to sleep and waits for
  5983. * some fibre event. When either the interrupt handler or
  5984. * the timer routine detects a event it will one of the task
  5985. * bits then wake us up.
  5986. **************************************************************************/
  5987. static int
  5988. qla2x00_do_dpc(void *data)
  5989. {
  5990. scsi_qla_host_t *base_vha;
  5991. struct qla_hw_data *ha;
  5992. uint32_t online;
  5993. struct qla_qpair *qpair;
  5994. ha = (struct qla_hw_data *)data;
  5995. base_vha = pci_get_drvdata(ha->pdev);
  5996. set_user_nice(current, MIN_NICE);
  5997. set_current_state(TASK_INTERRUPTIBLE);
  5998. while (!kthread_should_stop()) {
  5999. ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
  6000. "DPC handler sleeping.\n");
  6001. schedule();
  6002. if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags))
  6003. qla_pci_set_eeh_busy(base_vha);
  6004. if (!base_vha->flags.init_done || ha->flags.mbox_busy)
  6005. goto end_loop;
  6006. if (ha->flags.eeh_busy) {
  6007. ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
  6008. "eeh_busy=%d.\n", ha->flags.eeh_busy);
  6009. goto end_loop;
  6010. }
  6011. ha->dpc_active = 1;
  6012. ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
  6013. "DPC handler waking up, dpc_flags=0x%lx.\n",
  6014. base_vha->dpc_flags);
  6015. if (test_bit(UNLOADING, &base_vha->dpc_flags))
  6016. break;
  6017. if (IS_P3P_TYPE(ha)) {
  6018. if (IS_QLA8044(ha)) {
  6019. if (test_and_clear_bit(ISP_UNRECOVERABLE,
  6020. &base_vha->dpc_flags)) {
  6021. qla8044_idc_lock(ha);
  6022. qla8044_wr_direct(base_vha,
  6023. QLA8044_CRB_DEV_STATE_INDEX,
  6024. QLA8XXX_DEV_FAILED);
  6025. qla8044_idc_unlock(ha);
  6026. ql_log(ql_log_info, base_vha, 0x4004,
  6027. "HW State: FAILED.\n");
  6028. qla8044_device_state_handler(base_vha);
  6029. continue;
  6030. }
  6031. } else {
  6032. if (test_and_clear_bit(ISP_UNRECOVERABLE,
  6033. &base_vha->dpc_flags)) {
  6034. qla82xx_idc_lock(ha);
  6035. qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
  6036. QLA8XXX_DEV_FAILED);
  6037. qla82xx_idc_unlock(ha);
  6038. ql_log(ql_log_info, base_vha, 0x0151,
  6039. "HW State: FAILED.\n");
  6040. qla82xx_device_state_handler(base_vha);
  6041. continue;
  6042. }
  6043. }
  6044. if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
  6045. &base_vha->dpc_flags)) {
  6046. ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
  6047. "FCoE context reset scheduled.\n");
  6048. if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
  6049. &base_vha->dpc_flags))) {
  6050. if (qla82xx_fcoe_ctx_reset(base_vha)) {
  6051. /* FCoE-ctx reset failed.
  6052. * Escalate to chip-reset
  6053. */
  6054. set_bit(ISP_ABORT_NEEDED,
  6055. &base_vha->dpc_flags);
  6056. }
  6057. clear_bit(ABORT_ISP_ACTIVE,
  6058. &base_vha->dpc_flags);
  6059. }
  6060. ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
  6061. "FCoE context reset end.\n");
  6062. }
  6063. } else if (IS_QLAFX00(ha)) {
  6064. if (test_and_clear_bit(ISP_UNRECOVERABLE,
  6065. &base_vha->dpc_flags)) {
  6066. ql_dbg(ql_dbg_dpc, base_vha, 0x4020,
  6067. "Firmware Reset Recovery\n");
  6068. if (qlafx00_reset_initialize(base_vha)) {
  6069. /* Failed. Abort isp later. */
  6070. if (!test_bit(UNLOADING,
  6071. &base_vha->dpc_flags)) {
  6072. set_bit(ISP_UNRECOVERABLE,
  6073. &base_vha->dpc_flags);
  6074. ql_dbg(ql_dbg_dpc, base_vha,
  6075. 0x4021,
  6076. "Reset Recovery Failed\n");
  6077. }
  6078. }
  6079. }
  6080. if (test_and_clear_bit(FX00_TARGET_SCAN,
  6081. &base_vha->dpc_flags)) {
  6082. ql_dbg(ql_dbg_dpc, base_vha, 0x4022,
  6083. "ISPFx00 Target Scan scheduled\n");
  6084. if (qlafx00_rescan_isp(base_vha)) {
  6085. if (!test_bit(UNLOADING,
  6086. &base_vha->dpc_flags))
  6087. set_bit(ISP_UNRECOVERABLE,
  6088. &base_vha->dpc_flags);
  6089. ql_dbg(ql_dbg_dpc, base_vha, 0x401e,
  6090. "ISPFx00 Target Scan Failed\n");
  6091. }
  6092. ql_dbg(ql_dbg_dpc, base_vha, 0x401f,
  6093. "ISPFx00 Target Scan End\n");
  6094. }
  6095. if (test_and_clear_bit(FX00_HOST_INFO_RESEND,
  6096. &base_vha->dpc_flags)) {
  6097. ql_dbg(ql_dbg_dpc, base_vha, 0x4023,
  6098. "ISPFx00 Host Info resend scheduled\n");
  6099. qlafx00_fx_disc(base_vha,
  6100. &base_vha->hw->mr.fcport,
  6101. FXDISC_REG_HOST_INFO);
  6102. }
  6103. }
  6104. if (test_and_clear_bit(DETECT_SFP_CHANGE,
  6105. &base_vha->dpc_flags)) {
  6106. /* Semantic:
  6107. * - NO-OP -- await next ISP-ABORT. Preferred method
  6108. * to minimize disruptions that will occur
  6109. * when a forced chip-reset occurs.
  6110. * - Force -- ISP-ABORT scheduled.
  6111. */
  6112. /* set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); */
  6113. }
  6114. if (test_and_clear_bit
  6115. (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
  6116. !test_bit(UNLOADING, &base_vha->dpc_flags)) {
  6117. bool do_reset = true;
  6118. switch (base_vha->qlini_mode) {
  6119. case QLA2XXX_INI_MODE_ENABLED:
  6120. break;
  6121. case QLA2XXX_INI_MODE_DISABLED:
  6122. if (!qla_tgt_mode_enabled(base_vha) &&
  6123. !ha->flags.fw_started)
  6124. do_reset = false;
  6125. break;
  6126. case QLA2XXX_INI_MODE_DUAL:
  6127. if (!qla_dual_mode_enabled(base_vha) &&
  6128. !ha->flags.fw_started)
  6129. do_reset = false;
  6130. break;
  6131. default:
  6132. break;
  6133. }
  6134. if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
  6135. &base_vha->dpc_flags))) {
  6136. base_vha->flags.online = 1;
  6137. ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
  6138. "ISP abort scheduled.\n");
  6139. if (ha->isp_ops->abort_isp(base_vha)) {
  6140. /* failed. retry later */
  6141. set_bit(ISP_ABORT_NEEDED,
  6142. &base_vha->dpc_flags);
  6143. }
  6144. clear_bit(ABORT_ISP_ACTIVE,
  6145. &base_vha->dpc_flags);
  6146. ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
  6147. "ISP abort end.\n");
  6148. }
  6149. }
  6150. if (test_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags)) {
  6151. if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
  6152. qla24xx_process_purex_list
  6153. (&base_vha->purex_list);
  6154. clear_bit(PROCESS_PUREX_IOCB,
  6155. &base_vha->dpc_flags);
  6156. }
  6157. }
  6158. if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
  6159. &base_vha->dpc_flags)) {
  6160. qla2x00_update_fcports(base_vha);
  6161. }
  6162. if (IS_QLAFX00(ha))
  6163. goto loop_resync_check;
  6164. if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
  6165. ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
  6166. "Quiescence mode scheduled.\n");
  6167. if (IS_P3P_TYPE(ha)) {
  6168. if (IS_QLA82XX(ha))
  6169. qla82xx_device_state_handler(base_vha);
  6170. if (IS_QLA8044(ha))
  6171. qla8044_device_state_handler(base_vha);
  6172. clear_bit(ISP_QUIESCE_NEEDED,
  6173. &base_vha->dpc_flags);
  6174. if (!ha->flags.quiesce_owner) {
  6175. qla2x00_perform_loop_resync(base_vha);
  6176. if (IS_QLA82XX(ha)) {
  6177. qla82xx_idc_lock(ha);
  6178. qla82xx_clear_qsnt_ready(
  6179. base_vha);
  6180. qla82xx_idc_unlock(ha);
  6181. } else if (IS_QLA8044(ha)) {
  6182. qla8044_idc_lock(ha);
  6183. qla8044_clear_qsnt_ready(
  6184. base_vha);
  6185. qla8044_idc_unlock(ha);
  6186. }
  6187. }
  6188. } else {
  6189. clear_bit(ISP_QUIESCE_NEEDED,
  6190. &base_vha->dpc_flags);
  6191. qla2x00_quiesce_io(base_vha);
  6192. }
  6193. ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
  6194. "Quiescence mode end.\n");
  6195. }
  6196. if (test_and_clear_bit(RESET_MARKER_NEEDED,
  6197. &base_vha->dpc_flags) &&
  6198. (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
  6199. ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
  6200. "Reset marker scheduled.\n");
  6201. qla2x00_rst_aen(base_vha);
  6202. clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
  6203. ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
  6204. "Reset marker end.\n");
  6205. }
  6206. /* Retry each device up to login retry count */
  6207. if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) &&
  6208. !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
  6209. atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
  6210. if (!base_vha->relogin_jif ||
  6211. time_after_eq(jiffies, base_vha->relogin_jif)) {
  6212. base_vha->relogin_jif = jiffies + HZ;
  6213. clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags);
  6214. ql_dbg(ql_dbg_disc, base_vha, 0x400d,
  6215. "Relogin scheduled.\n");
  6216. qla24xx_post_relogin_work(base_vha);
  6217. }
  6218. }
  6219. loop_resync_check:
  6220. if (!qla2x00_reset_active(base_vha) &&
  6221. test_and_clear_bit(LOOP_RESYNC_NEEDED,
  6222. &base_vha->dpc_flags)) {
  6223. /*
  6224. * Allow abort_isp to complete before moving on to scanning.
  6225. */
  6226. ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
  6227. "Loop resync scheduled.\n");
  6228. if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
  6229. &base_vha->dpc_flags))) {
  6230. qla2x00_loop_resync(base_vha);
  6231. clear_bit(LOOP_RESYNC_ACTIVE,
  6232. &base_vha->dpc_flags);
  6233. }
  6234. ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
  6235. "Loop resync end.\n");
  6236. }
  6237. if (IS_QLAFX00(ha))
  6238. goto intr_on_check;
  6239. if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
  6240. atomic_read(&base_vha->loop_state) == LOOP_READY) {
  6241. clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
  6242. qla2xxx_flash_npiv_conf(base_vha);
  6243. }
  6244. intr_on_check:
  6245. if (!ha->interrupts_on)
  6246. ha->isp_ops->enable_intrs(ha);
  6247. if (test_and_clear_bit(BEACON_BLINK_NEEDED,
  6248. &base_vha->dpc_flags)) {
  6249. if (ha->beacon_blink_led == 1)
  6250. ha->isp_ops->beacon_blink(base_vha);
  6251. }
  6252. /* qpair online check */
  6253. if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED,
  6254. &base_vha->dpc_flags)) {
  6255. if (ha->flags.eeh_busy ||
  6256. ha->flags.pci_channel_io_perm_failure)
  6257. online = 0;
  6258. else
  6259. online = 1;
  6260. mutex_lock(&ha->mq_lock);
  6261. list_for_each_entry(qpair, &base_vha->qp_list,
  6262. qp_list_elem)
  6263. qpair->online = online;
  6264. mutex_unlock(&ha->mq_lock);
  6265. }
  6266. if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
  6267. &base_vha->dpc_flags)) {
  6268. u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold;
  6269. if (threshold > ha->orig_fw_xcb_count)
  6270. threshold = ha->orig_fw_xcb_count;
  6271. ql_log(ql_log_info, base_vha, 0xffffff,
  6272. "SET ZIO Activity exchange threshold to %d.\n",
  6273. threshold);
  6274. if (qla27xx_set_zio_threshold(base_vha, threshold)) {
  6275. ql_log(ql_log_info, base_vha, 0xffffff,
  6276. "Unable to SET ZIO Activity exchange threshold to %d.\n",
  6277. threshold);
  6278. }
  6279. }
  6280. if (!IS_QLAFX00(ha))
  6281. qla2x00_do_dpc_all_vps(base_vha);
  6282. if (test_and_clear_bit(N2N_LINK_RESET,
  6283. &base_vha->dpc_flags)) {
  6284. qla2x00_lip_reset(base_vha);
  6285. }
  6286. ha->dpc_active = 0;
  6287. end_loop:
  6288. set_current_state(TASK_INTERRUPTIBLE);
  6289. } /* End of while(1) */
  6290. __set_current_state(TASK_RUNNING);
  6291. ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
  6292. "DPC handler exiting.\n");
  6293. /*
  6294. * Make sure that nobody tries to wake us up again.
  6295. */
  6296. ha->dpc_active = 0;
  6297. /* Cleanup any residual CTX SRBs. */
  6298. qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
  6299. return 0;
  6300. }
  6301. void
  6302. qla2xxx_wake_dpc(struct scsi_qla_host *vha)
  6303. {
  6304. struct qla_hw_data *ha = vha->hw;
  6305. struct task_struct *t = ha->dpc_thread;
  6306. if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
  6307. wake_up_process(t);
  6308. }
  6309. /*
  6310. * qla2x00_rst_aen
  6311. * Processes asynchronous reset.
  6312. *
  6313. * Input:
  6314. * ha = adapter block pointer.
  6315. */
  6316. static void
  6317. qla2x00_rst_aen(scsi_qla_host_t *vha)
  6318. {
  6319. if (vha->flags.online && !vha->flags.reset_active &&
  6320. !atomic_read(&vha->loop_down_timer) &&
  6321. !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
  6322. do {
  6323. clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  6324. /*
  6325. * Issue marker command only when we are going to start
  6326. * the I/O.
  6327. */
  6328. vha->marker_needed = 1;
  6329. } while (!atomic_read(&vha->loop_down_timer) &&
  6330. (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
  6331. }
  6332. }
  6333. static bool qla_do_heartbeat(struct scsi_qla_host *vha)
  6334. {
  6335. struct qla_hw_data *ha = vha->hw;
  6336. u32 cmpl_cnt;
  6337. u16 i;
  6338. bool do_heartbeat = false;
  6339. /*
  6340. * Allow do_heartbeat only if we don’t have any active interrupts,
  6341. * but there are still IOs outstanding with firmware.
  6342. */
  6343. cmpl_cnt = ha->base_qpair->cmd_completion_cnt;
  6344. if (cmpl_cnt == ha->base_qpair->prev_completion_cnt &&
  6345. cmpl_cnt != ha->base_qpair->cmd_cnt) {
  6346. do_heartbeat = true;
  6347. goto skip;
  6348. }
  6349. ha->base_qpair->prev_completion_cnt = cmpl_cnt;
  6350. for (i = 0; i < ha->max_qpairs; i++) {
  6351. if (ha->queue_pair_map[i]) {
  6352. cmpl_cnt = ha->queue_pair_map[i]->cmd_completion_cnt;
  6353. if (cmpl_cnt == ha->queue_pair_map[i]->prev_completion_cnt &&
  6354. cmpl_cnt != ha->queue_pair_map[i]->cmd_cnt) {
  6355. do_heartbeat = true;
  6356. break;
  6357. }
  6358. ha->queue_pair_map[i]->prev_completion_cnt = cmpl_cnt;
  6359. }
  6360. }
  6361. skip:
  6362. return do_heartbeat;
  6363. }
  6364. static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started)
  6365. {
  6366. struct qla_hw_data *ha = vha->hw;
  6367. if (vha->vp_idx)
  6368. return;
  6369. if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha))
  6370. return;
  6371. /*
  6372. * dpc thread cannot run if heartbeat is running at the same time.
  6373. * We also do not want to starve heartbeat task. Therefore, do
  6374. * heartbeat task at least once every 5 seconds.
  6375. */
  6376. if (dpc_started &&
  6377. time_before(jiffies, ha->last_heartbeat_run_jiffies + 5 * HZ))
  6378. return;
  6379. if (qla_do_heartbeat(vha)) {
  6380. ha->last_heartbeat_run_jiffies = jiffies;
  6381. queue_work(ha->wq, &ha->heartbeat_work);
  6382. }
  6383. }
  6384. static void qla_wind_down_chip(scsi_qla_host_t *vha)
  6385. {
  6386. struct qla_hw_data *ha = vha->hw;
  6387. if (!ha->flags.eeh_busy)
  6388. return;
  6389. if (ha->pci_error_state)
  6390. /* system is trying to recover */
  6391. return;
  6392. /*
  6393. * Current system is not handling PCIE error. At this point, this is
  6394. * best effort to wind down the adapter.
  6395. */
  6396. if (time_after_eq(jiffies, ha->eeh_jif + ql2xdelay_before_pci_error_handling * HZ) &&
  6397. !ha->flags.eeh_flush) {
  6398. ql_log(ql_log_info, vha, 0x9009,
  6399. "PCI Error detected, attempting to reset hardware.\n");
  6400. ha->isp_ops->reset_chip(vha);
  6401. ha->isp_ops->disable_intrs(ha);
  6402. ha->flags.eeh_flush = EEH_FLUSH_RDY;
  6403. ha->eeh_jif = jiffies;
  6404. } else if (ha->flags.eeh_flush == EEH_FLUSH_RDY &&
  6405. time_after_eq(jiffies, ha->eeh_jif + 5 * HZ)) {
  6406. pci_clear_master(ha->pdev);
  6407. /* flush all command */
  6408. qla2x00_abort_isp_cleanup(vha);
  6409. ha->flags.eeh_flush = EEH_FLUSH_DONE;
  6410. ql_log(ql_log_info, vha, 0x900a,
  6411. "PCI Error handling complete, all IOs aborted.\n");
  6412. }
  6413. }
  6414. /**************************************************************************
  6415. * qla2x00_timer
  6416. *
  6417. * Description:
  6418. * One second timer
  6419. *
  6420. * Context: Interrupt
  6421. ***************************************************************************/
  6422. void
  6423. qla2x00_timer(struct timer_list *t)
  6424. {
  6425. scsi_qla_host_t *vha = from_timer(vha, t, timer);
  6426. unsigned long cpu_flags = 0;
  6427. int start_dpc = 0;
  6428. int index;
  6429. srb_t *sp;
  6430. uint16_t w;
  6431. struct qla_hw_data *ha = vha->hw;
  6432. struct req_que *req;
  6433. unsigned long flags;
  6434. fc_port_t *fcport = NULL;
  6435. if (ha->flags.eeh_busy) {
  6436. qla_wind_down_chip(vha);
  6437. ql_dbg(ql_dbg_timer, vha, 0x6000,
  6438. "EEH = %d, restarting timer.\n",
  6439. ha->flags.eeh_busy);
  6440. qla2x00_restart_timer(vha, WATCH_INTERVAL);
  6441. return;
  6442. }
  6443. /*
  6444. * Hardware read to raise pending EEH errors during mailbox waits. If
  6445. * the read returns -1 then disable the board.
  6446. */
  6447. if (!pci_channel_offline(ha->pdev)) {
  6448. pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
  6449. qla2x00_check_reg16_for_disconnect(vha, w);
  6450. }
  6451. /* Make sure qla82xx_watchdog is run only for physical port */
  6452. if (!vha->vp_idx && IS_P3P_TYPE(ha)) {
  6453. if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
  6454. start_dpc++;
  6455. if (IS_QLA82XX(ha))
  6456. qla82xx_watchdog(vha);
  6457. else if (IS_QLA8044(ha))
  6458. qla8044_watchdog(vha);
  6459. }
  6460. if (!vha->vp_idx && IS_QLAFX00(ha))
  6461. qlafx00_timer_routine(vha);
  6462. if (vha->link_down_time < QLA2XX_MAX_LINK_DOWN_TIME)
  6463. vha->link_down_time++;
  6464. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  6465. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  6466. if (fcport->tgt_link_down_time < QLA2XX_MAX_LINK_DOWN_TIME)
  6467. fcport->tgt_link_down_time++;
  6468. }
  6469. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  6470. /* Loop down handler. */
  6471. if (atomic_read(&vha->loop_down_timer) > 0 &&
  6472. !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
  6473. !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))
  6474. && vha->flags.online) {
  6475. if (atomic_read(&vha->loop_down_timer) ==
  6476. vha->loop_down_abort_time) {
  6477. ql_log(ql_log_info, vha, 0x6008,
  6478. "Loop down - aborting the queues before time expires.\n");
  6479. if (!IS_QLA2100(ha) && vha->link_down_timeout)
  6480. atomic_set(&vha->loop_state, LOOP_DEAD);
  6481. /*
  6482. * Schedule an ISP abort to return any FCP2-device
  6483. * commands.
  6484. */
  6485. /* NPIV - scan physical port only */
  6486. if (!vha->vp_idx) {
  6487. spin_lock_irqsave(&ha->hardware_lock,
  6488. cpu_flags);
  6489. req = ha->req_q_map[0];
  6490. for (index = 1;
  6491. index < req->num_outstanding_cmds;
  6492. index++) {
  6493. fc_port_t *sfcp;
  6494. sp = req->outstanding_cmds[index];
  6495. if (!sp)
  6496. continue;
  6497. if (sp->cmd_type != TYPE_SRB)
  6498. continue;
  6499. if (sp->type != SRB_SCSI_CMD)
  6500. continue;
  6501. sfcp = sp->fcport;
  6502. if (!(sfcp->flags & FCF_FCP2_DEVICE))
  6503. continue;
  6504. if (IS_QLA82XX(ha))
  6505. set_bit(FCOE_CTX_RESET_NEEDED,
  6506. &vha->dpc_flags);
  6507. else
  6508. set_bit(ISP_ABORT_NEEDED,
  6509. &vha->dpc_flags);
  6510. break;
  6511. }
  6512. spin_unlock_irqrestore(&ha->hardware_lock,
  6513. cpu_flags);
  6514. }
  6515. start_dpc++;
  6516. }
  6517. /* if the loop has been down for 4 minutes, reinit adapter */
  6518. if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
  6519. if (!(vha->device_flags & DFLG_NO_CABLE) && !vha->vp_idx) {
  6520. ql_log(ql_log_warn, vha, 0x6009,
  6521. "Loop down - aborting ISP.\n");
  6522. if (IS_QLA82XX(ha))
  6523. set_bit(FCOE_CTX_RESET_NEEDED,
  6524. &vha->dpc_flags);
  6525. else
  6526. set_bit(ISP_ABORT_NEEDED,
  6527. &vha->dpc_flags);
  6528. }
  6529. }
  6530. ql_dbg(ql_dbg_timer, vha, 0x600a,
  6531. "Loop down - seconds remaining %d.\n",
  6532. atomic_read(&vha->loop_down_timer));
  6533. }
  6534. /* Check if beacon LED needs to be blinked for physical host only */
  6535. if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
  6536. /* There is no beacon_blink function for ISP82xx */
  6537. if (!IS_P3P_TYPE(ha)) {
  6538. set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
  6539. start_dpc++;
  6540. }
  6541. }
  6542. /* check if edif running */
  6543. if (vha->hw->flags.edif_enabled)
  6544. qla_edif_timer(vha);
  6545. /* Process any deferred work. */
  6546. if (!list_empty(&vha->work_list)) {
  6547. unsigned long flags;
  6548. bool q = false;
  6549. spin_lock_irqsave(&vha->work_lock, flags);
  6550. if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
  6551. q = true;
  6552. spin_unlock_irqrestore(&vha->work_lock, flags);
  6553. if (q)
  6554. queue_work(vha->hw->wq, &vha->iocb_work);
  6555. }
  6556. /*
  6557. * FC-NVME
  6558. * see if the active AEN count has changed from what was last reported.
  6559. */
  6560. index = atomic_read(&ha->nvme_active_aen_cnt);
  6561. if (!vha->vp_idx &&
  6562. (index != ha->nvme_last_rptd_aen) &&
  6563. ha->zio_mode == QLA_ZIO_MODE_6 &&
  6564. !ha->flags.host_shutting_down) {
  6565. ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
  6566. ql_log(ql_log_info, vha, 0x3002,
  6567. "nvme: Sched: Set ZIO exchange threshold to %d.\n",
  6568. ha->nvme_last_rptd_aen);
  6569. set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
  6570. start_dpc++;
  6571. }
  6572. if (!vha->vp_idx &&
  6573. atomic_read(&ha->zio_threshold) != ha->last_zio_threshold &&
  6574. IS_ZIO_THRESHOLD_CAPABLE(ha)) {
  6575. ql_log(ql_log_info, vha, 0x3002,
  6576. "Sched: Set ZIO exchange threshold to %d.\n",
  6577. ha->last_zio_threshold);
  6578. ha->last_zio_threshold = atomic_read(&ha->zio_threshold);
  6579. set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
  6580. start_dpc++;
  6581. }
  6582. /* borrowing w to signify dpc will run */
  6583. w = 0;
  6584. /* Schedule the DPC routine if needed */
  6585. if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  6586. test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
  6587. test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
  6588. start_dpc ||
  6589. test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
  6590. test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
  6591. test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
  6592. test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
  6593. test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
  6594. test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
  6595. test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) {
  6596. ql_dbg(ql_dbg_timer, vha, 0x600b,
  6597. "isp_abort_needed=%d loop_resync_needed=%d "
  6598. "fcport_update_needed=%d start_dpc=%d "
  6599. "reset_marker_needed=%d",
  6600. test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
  6601. test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
  6602. test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
  6603. start_dpc,
  6604. test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
  6605. ql_dbg(ql_dbg_timer, vha, 0x600c,
  6606. "beacon_blink_needed=%d isp_unrecoverable=%d "
  6607. "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
  6608. "relogin_needed=%d, Process_purex_iocb=%d.\n",
  6609. test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
  6610. test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
  6611. test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
  6612. test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
  6613. test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
  6614. test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags));
  6615. qla2xxx_wake_dpc(vha);
  6616. w = 1;
  6617. }
  6618. qla_heart_beat(vha, w);
  6619. qla2x00_restart_timer(vha, WATCH_INTERVAL);
  6620. }
  6621. /* Firmware interface routines. */
  6622. #define FW_ISP21XX 0
  6623. #define FW_ISP22XX 1
  6624. #define FW_ISP2300 2
  6625. #define FW_ISP2322 3
  6626. #define FW_ISP24XX 4
  6627. #define FW_ISP25XX 5
  6628. #define FW_ISP81XX 6
  6629. #define FW_ISP82XX 7
  6630. #define FW_ISP2031 8
  6631. #define FW_ISP8031 9
  6632. #define FW_ISP27XX 10
  6633. #define FW_ISP28XX 11
  6634. #define FW_FILE_ISP21XX "ql2100_fw.bin"
  6635. #define FW_FILE_ISP22XX "ql2200_fw.bin"
  6636. #define FW_FILE_ISP2300 "ql2300_fw.bin"
  6637. #define FW_FILE_ISP2322 "ql2322_fw.bin"
  6638. #define FW_FILE_ISP24XX "ql2400_fw.bin"
  6639. #define FW_FILE_ISP25XX "ql2500_fw.bin"
  6640. #define FW_FILE_ISP81XX "ql8100_fw.bin"
  6641. #define FW_FILE_ISP82XX "ql8200_fw.bin"
  6642. #define FW_FILE_ISP2031 "ql2600_fw.bin"
  6643. #define FW_FILE_ISP8031 "ql8300_fw.bin"
  6644. #define FW_FILE_ISP27XX "ql2700_fw.bin"
  6645. #define FW_FILE_ISP28XX "ql2800_fw.bin"
  6646. static DEFINE_MUTEX(qla_fw_lock);
  6647. static struct fw_blob qla_fw_blobs[] = {
  6648. { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
  6649. { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
  6650. { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
  6651. { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
  6652. { .name = FW_FILE_ISP24XX, },
  6653. { .name = FW_FILE_ISP25XX, },
  6654. { .name = FW_FILE_ISP81XX, },
  6655. { .name = FW_FILE_ISP82XX, },
  6656. { .name = FW_FILE_ISP2031, },
  6657. { .name = FW_FILE_ISP8031, },
  6658. { .name = FW_FILE_ISP27XX, },
  6659. { .name = FW_FILE_ISP28XX, },
  6660. { .name = NULL, },
  6661. };
  6662. struct fw_blob *
  6663. qla2x00_request_firmware(scsi_qla_host_t *vha)
  6664. {
  6665. struct qla_hw_data *ha = vha->hw;
  6666. struct fw_blob *blob;
  6667. if (IS_QLA2100(ha)) {
  6668. blob = &qla_fw_blobs[FW_ISP21XX];
  6669. } else if (IS_QLA2200(ha)) {
  6670. blob = &qla_fw_blobs[FW_ISP22XX];
  6671. } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
  6672. blob = &qla_fw_blobs[FW_ISP2300];
  6673. } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
  6674. blob = &qla_fw_blobs[FW_ISP2322];
  6675. } else if (IS_QLA24XX_TYPE(ha)) {
  6676. blob = &qla_fw_blobs[FW_ISP24XX];
  6677. } else if (IS_QLA25XX(ha)) {
  6678. blob = &qla_fw_blobs[FW_ISP25XX];
  6679. } else if (IS_QLA81XX(ha)) {
  6680. blob = &qla_fw_blobs[FW_ISP81XX];
  6681. } else if (IS_QLA82XX(ha)) {
  6682. blob = &qla_fw_blobs[FW_ISP82XX];
  6683. } else if (IS_QLA2031(ha)) {
  6684. blob = &qla_fw_blobs[FW_ISP2031];
  6685. } else if (IS_QLA8031(ha)) {
  6686. blob = &qla_fw_blobs[FW_ISP8031];
  6687. } else if (IS_QLA27XX(ha)) {
  6688. blob = &qla_fw_blobs[FW_ISP27XX];
  6689. } else if (IS_QLA28XX(ha)) {
  6690. blob = &qla_fw_blobs[FW_ISP28XX];
  6691. } else {
  6692. return NULL;
  6693. }
  6694. if (!blob->name)
  6695. return NULL;
  6696. mutex_lock(&qla_fw_lock);
  6697. if (blob->fw)
  6698. goto out;
  6699. if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
  6700. ql_log(ql_log_warn, vha, 0x0063,
  6701. "Failed to load firmware image (%s).\n", blob->name);
  6702. blob->fw = NULL;
  6703. blob = NULL;
  6704. }
  6705. out:
  6706. mutex_unlock(&qla_fw_lock);
  6707. return blob;
  6708. }
  6709. static void
  6710. qla2x00_release_firmware(void)
  6711. {
  6712. struct fw_blob *blob;
  6713. mutex_lock(&qla_fw_lock);
  6714. for (blob = qla_fw_blobs; blob->name; blob++)
  6715. release_firmware(blob->fw);
  6716. mutex_unlock(&qla_fw_lock);
  6717. }
  6718. static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
  6719. {
  6720. struct qla_hw_data *ha = vha->hw;
  6721. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  6722. struct qla_qpair *qpair = NULL;
  6723. struct scsi_qla_host *vp, *tvp;
  6724. fc_port_t *fcport;
  6725. int i;
  6726. unsigned long flags;
  6727. ql_dbg(ql_dbg_aer, vha, 0x9000,
  6728. "%s\n", __func__);
  6729. ha->chip_reset++;
  6730. ha->base_qpair->chip_reset = ha->chip_reset;
  6731. for (i = 0; i < ha->max_qpairs; i++) {
  6732. if (ha->queue_pair_map[i])
  6733. ha->queue_pair_map[i]->chip_reset =
  6734. ha->base_qpair->chip_reset;
  6735. }
  6736. /*
  6737. * purge mailbox might take a while. Slot Reset/chip reset
  6738. * will take care of the purge
  6739. */
  6740. mutex_lock(&ha->mq_lock);
  6741. ha->base_qpair->online = 0;
  6742. list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
  6743. qpair->online = 0;
  6744. wmb();
  6745. mutex_unlock(&ha->mq_lock);
  6746. qla2x00_mark_all_devices_lost(vha);
  6747. spin_lock_irqsave(&ha->vport_slock, flags);
  6748. list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
  6749. atomic_inc(&vp->vref_count);
  6750. spin_unlock_irqrestore(&ha->vport_slock, flags);
  6751. qla2x00_mark_all_devices_lost(vp);
  6752. spin_lock_irqsave(&ha->vport_slock, flags);
  6753. atomic_dec(&vp->vref_count);
  6754. }
  6755. spin_unlock_irqrestore(&ha->vport_slock, flags);
  6756. /* Clear all async request states across all VPs. */
  6757. list_for_each_entry(fcport, &vha->vp_fcports, list)
  6758. fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
  6759. spin_lock_irqsave(&ha->vport_slock, flags);
  6760. list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
  6761. atomic_inc(&vp->vref_count);
  6762. spin_unlock_irqrestore(&ha->vport_slock, flags);
  6763. list_for_each_entry(fcport, &vp->vp_fcports, list)
  6764. fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
  6765. spin_lock_irqsave(&ha->vport_slock, flags);
  6766. atomic_dec(&vp->vref_count);
  6767. }
  6768. spin_unlock_irqrestore(&ha->vport_slock, flags);
  6769. }
  6770. static pci_ers_result_t
  6771. qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
  6772. {
  6773. scsi_qla_host_t *vha = pci_get_drvdata(pdev);
  6774. struct qla_hw_data *ha = vha->hw;
  6775. pci_ers_result_t ret = PCI_ERS_RESULT_NEED_RESET;
  6776. ql_log(ql_log_warn, vha, 0x9000,
  6777. "PCI error detected, state %x.\n", state);
  6778. ha->pci_error_state = QLA_PCI_ERR_DETECTED;
  6779. if (!atomic_read(&pdev->enable_cnt)) {
  6780. ql_log(ql_log_info, vha, 0xffff,
  6781. "PCI device is disabled,state %x\n", state);
  6782. ret = PCI_ERS_RESULT_NEED_RESET;
  6783. goto out;
  6784. }
  6785. switch (state) {
  6786. case pci_channel_io_normal:
  6787. qla_pci_set_eeh_busy(vha);
  6788. if (ql2xmqsupport || ql2xnvmeenable) {
  6789. set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
  6790. qla2xxx_wake_dpc(vha);
  6791. }
  6792. ret = PCI_ERS_RESULT_CAN_RECOVER;
  6793. break;
  6794. case pci_channel_io_frozen:
  6795. qla_pci_set_eeh_busy(vha);
  6796. ret = PCI_ERS_RESULT_NEED_RESET;
  6797. break;
  6798. case pci_channel_io_perm_failure:
  6799. ha->flags.pci_channel_io_perm_failure = 1;
  6800. qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
  6801. if (ql2xmqsupport || ql2xnvmeenable) {
  6802. set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
  6803. qla2xxx_wake_dpc(vha);
  6804. }
  6805. ret = PCI_ERS_RESULT_DISCONNECT;
  6806. }
  6807. out:
  6808. ql_dbg(ql_dbg_aer, vha, 0x600d,
  6809. "PCI error detected returning [%x].\n", ret);
  6810. return ret;
  6811. }
  6812. static pci_ers_result_t
  6813. qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
  6814. {
  6815. int risc_paused = 0;
  6816. uint32_t stat;
  6817. unsigned long flags;
  6818. scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
  6819. struct qla_hw_data *ha = base_vha->hw;
  6820. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  6821. struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
  6822. ql_log(ql_log_warn, base_vha, 0x9000,
  6823. "mmio enabled\n");
  6824. ha->pci_error_state = QLA_PCI_MMIO_ENABLED;
  6825. if (IS_QLA82XX(ha))
  6826. return PCI_ERS_RESULT_RECOVERED;
  6827. if (qla2x00_isp_reg_stat(ha)) {
  6828. ql_log(ql_log_info, base_vha, 0x803f,
  6829. "During mmio enabled, PCI/Register disconnect still detected.\n");
  6830. goto out;
  6831. }
  6832. spin_lock_irqsave(&ha->hardware_lock, flags);
  6833. if (IS_QLA2100(ha) || IS_QLA2200(ha)){
  6834. stat = rd_reg_word(&reg->hccr);
  6835. if (stat & HCCR_RISC_PAUSE)
  6836. risc_paused = 1;
  6837. } else if (IS_QLA23XX(ha)) {
  6838. stat = rd_reg_dword(&reg->u.isp2300.host_status);
  6839. if (stat & HSR_RISC_PAUSED)
  6840. risc_paused = 1;
  6841. } else if (IS_FWI2_CAPABLE(ha)) {
  6842. stat = rd_reg_dword(&reg24->host_status);
  6843. if (stat & HSRX_RISC_PAUSED)
  6844. risc_paused = 1;
  6845. }
  6846. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  6847. if (risc_paused) {
  6848. ql_log(ql_log_info, base_vha, 0x9003,
  6849. "RISC paused -- mmio_enabled, Dumping firmware.\n");
  6850. qla2xxx_dump_fw(base_vha);
  6851. }
  6852. out:
  6853. /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */
  6854. ql_dbg(ql_dbg_aer, base_vha, 0x600d,
  6855. "mmio enabled returning.\n");
  6856. return PCI_ERS_RESULT_NEED_RESET;
  6857. }
  6858. static pci_ers_result_t
  6859. qla2xxx_pci_slot_reset(struct pci_dev *pdev)
  6860. {
  6861. pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
  6862. scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
  6863. struct qla_hw_data *ha = base_vha->hw;
  6864. int rc;
  6865. struct qla_qpair *qpair = NULL;
  6866. ql_log(ql_log_warn, base_vha, 0x9004,
  6867. "Slot Reset.\n");
  6868. ha->pci_error_state = QLA_PCI_SLOT_RESET;
  6869. /* Workaround: qla2xxx driver which access hardware earlier
  6870. * needs error state to be pci_channel_io_online.
  6871. * Otherwise mailbox command timesout.
  6872. */
  6873. pdev->error_state = pci_channel_io_normal;
  6874. pci_restore_state(pdev);
  6875. /* pci_restore_state() clears the saved_state flag of the device
  6876. * save restored state which resets saved_state flag
  6877. */
  6878. pci_save_state(pdev);
  6879. if (ha->mem_only)
  6880. rc = pci_enable_device_mem(pdev);
  6881. else
  6882. rc = pci_enable_device(pdev);
  6883. if (rc) {
  6884. ql_log(ql_log_warn, base_vha, 0x9005,
  6885. "Can't re-enable PCI device after reset.\n");
  6886. goto exit_slot_reset;
  6887. }
  6888. if (ha->isp_ops->pci_config(base_vha))
  6889. goto exit_slot_reset;
  6890. mutex_lock(&ha->mq_lock);
  6891. list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
  6892. qpair->online = 1;
  6893. mutex_unlock(&ha->mq_lock);
  6894. ha->flags.eeh_busy = 0;
  6895. base_vha->flags.online = 1;
  6896. set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  6897. ha->isp_ops->abort_isp(base_vha);
  6898. clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  6899. if (qla2x00_isp_reg_stat(ha)) {
  6900. ha->flags.eeh_busy = 1;
  6901. qla_pci_error_cleanup(base_vha);
  6902. ql_log(ql_log_warn, base_vha, 0x9005,
  6903. "Device unable to recover from PCI error.\n");
  6904. } else {
  6905. ret = PCI_ERS_RESULT_RECOVERED;
  6906. }
  6907. exit_slot_reset:
  6908. ql_dbg(ql_dbg_aer, base_vha, 0x900e,
  6909. "Slot Reset returning %x.\n", ret);
  6910. return ret;
  6911. }
  6912. static void
  6913. qla2xxx_pci_resume(struct pci_dev *pdev)
  6914. {
  6915. scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
  6916. struct qla_hw_data *ha = base_vha->hw;
  6917. int ret;
  6918. ql_log(ql_log_warn, base_vha, 0x900f,
  6919. "Pci Resume.\n");
  6920. ret = qla2x00_wait_for_hba_online(base_vha);
  6921. if (ret != QLA_SUCCESS) {
  6922. ql_log(ql_log_fatal, base_vha, 0x9002,
  6923. "The device failed to resume I/O from slot/link_reset.\n");
  6924. }
  6925. ha->pci_error_state = QLA_PCI_RESUME;
  6926. ql_dbg(ql_dbg_aer, base_vha, 0x600d,
  6927. "Pci Resume returning.\n");
  6928. }
  6929. void qla_pci_set_eeh_busy(struct scsi_qla_host *vha)
  6930. {
  6931. struct qla_hw_data *ha = vha->hw;
  6932. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  6933. bool do_cleanup = false;
  6934. unsigned long flags;
  6935. if (ha->flags.eeh_busy)
  6936. return;
  6937. spin_lock_irqsave(&base_vha->work_lock, flags);
  6938. if (!ha->flags.eeh_busy) {
  6939. ha->eeh_jif = jiffies;
  6940. ha->flags.eeh_flush = 0;
  6941. ha->flags.eeh_busy = 1;
  6942. do_cleanup = true;
  6943. }
  6944. spin_unlock_irqrestore(&base_vha->work_lock, flags);
  6945. if (do_cleanup)
  6946. qla_pci_error_cleanup(base_vha);
  6947. }
  6948. /*
  6949. * this routine will schedule a task to pause IO from interrupt context
  6950. * if caller sees a PCIE error event (register read = 0xf's)
  6951. */
  6952. void qla_schedule_eeh_work(struct scsi_qla_host *vha)
  6953. {
  6954. struct qla_hw_data *ha = vha->hw;
  6955. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  6956. if (ha->flags.eeh_busy)
  6957. return;
  6958. set_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags);
  6959. qla2xxx_wake_dpc(base_vha);
  6960. }
  6961. static void
  6962. qla_pci_reset_prepare(struct pci_dev *pdev)
  6963. {
  6964. scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
  6965. struct qla_hw_data *ha = base_vha->hw;
  6966. struct qla_qpair *qpair;
  6967. ql_log(ql_log_warn, base_vha, 0xffff,
  6968. "%s.\n", __func__);
  6969. /*
  6970. * PCI FLR/function reset is about to reset the
  6971. * slot. Stop the chip to stop all DMA access.
  6972. * It is assumed that pci_reset_done will be called
  6973. * after FLR to resume Chip operation.
  6974. */
  6975. ha->flags.eeh_busy = 1;
  6976. mutex_lock(&ha->mq_lock);
  6977. list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
  6978. qpair->online = 0;
  6979. mutex_unlock(&ha->mq_lock);
  6980. set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  6981. qla2x00_abort_isp_cleanup(base_vha);
  6982. qla2x00_abort_all_cmds(base_vha, DID_RESET << 16);
  6983. }
  6984. static void
  6985. qla_pci_reset_done(struct pci_dev *pdev)
  6986. {
  6987. scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
  6988. struct qla_hw_data *ha = base_vha->hw;
  6989. struct qla_qpair *qpair;
  6990. ql_log(ql_log_warn, base_vha, 0xffff,
  6991. "%s.\n", __func__);
  6992. /*
  6993. * FLR just completed by PCI layer. Resume adapter
  6994. */
  6995. ha->flags.eeh_busy = 0;
  6996. mutex_lock(&ha->mq_lock);
  6997. list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
  6998. qpair->online = 1;
  6999. mutex_unlock(&ha->mq_lock);
  7000. base_vha->flags.online = 1;
  7001. ha->isp_ops->abort_isp(base_vha);
  7002. clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  7003. }
  7004. static void qla2xxx_map_queues(struct Scsi_Host *shost)
  7005. {
  7006. scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
  7007. struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
  7008. if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
  7009. blk_mq_map_queues(qmap);
  7010. else
  7011. blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
  7012. }
  7013. struct scsi_host_template qla2xxx_driver_template = {
  7014. .module = THIS_MODULE,
  7015. .name = QLA2XXX_DRIVER_NAME,
  7016. .queuecommand = qla2xxx_queuecommand,
  7017. .eh_timed_out = fc_eh_timed_out,
  7018. .eh_abort_handler = qla2xxx_eh_abort,
  7019. .eh_should_retry_cmd = fc_eh_should_retry_cmd,
  7020. .eh_device_reset_handler = qla2xxx_eh_device_reset,
  7021. .eh_target_reset_handler = qla2xxx_eh_target_reset,
  7022. .eh_bus_reset_handler = qla2xxx_eh_bus_reset,
  7023. .eh_host_reset_handler = qla2xxx_eh_host_reset,
  7024. .slave_configure = qla2xxx_slave_configure,
  7025. .slave_alloc = qla2xxx_slave_alloc,
  7026. .slave_destroy = qla2xxx_slave_destroy,
  7027. .scan_finished = qla2xxx_scan_finished,
  7028. .scan_start = qla2xxx_scan_start,
  7029. .change_queue_depth = scsi_change_queue_depth,
  7030. .map_queues = qla2xxx_map_queues,
  7031. .this_id = -1,
  7032. .cmd_per_lun = 3,
  7033. .sg_tablesize = SG_ALL,
  7034. .max_sectors = 0xFFFF,
  7035. .shost_groups = qla2x00_host_groups,
  7036. .supported_mode = MODE_INITIATOR,
  7037. .track_queue_depth = 1,
  7038. .cmd_size = sizeof(srb_t),
  7039. };
  7040. static const struct pci_error_handlers qla2xxx_err_handler = {
  7041. .error_detected = qla2xxx_pci_error_detected,
  7042. .mmio_enabled = qla2xxx_pci_mmio_enabled,
  7043. .slot_reset = qla2xxx_pci_slot_reset,
  7044. .resume = qla2xxx_pci_resume,
  7045. .reset_prepare = qla_pci_reset_prepare,
  7046. .reset_done = qla_pci_reset_done,
  7047. };
  7048. static struct pci_device_id qla2xxx_pci_tbl[] = {
  7049. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
  7050. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
  7051. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
  7052. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
  7053. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
  7054. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
  7055. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
  7056. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
  7057. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
  7058. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
  7059. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
  7060. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
  7061. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
  7062. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
  7063. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
  7064. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
  7065. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
  7066. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
  7067. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
  7068. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
  7069. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
  7070. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
  7071. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) },
  7072. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) },
  7073. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) },
  7074. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) },
  7075. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) },
  7076. { 0 },
  7077. };
  7078. MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
  7079. static struct pci_driver qla2xxx_pci_driver = {
  7080. .name = QLA2XXX_DRIVER_NAME,
  7081. .driver = {
  7082. .owner = THIS_MODULE,
  7083. },
  7084. .id_table = qla2xxx_pci_tbl,
  7085. .probe = qla2x00_probe_one,
  7086. .remove = qla2x00_remove_one,
  7087. .shutdown = qla2x00_shutdown,
  7088. .err_handler = &qla2xxx_err_handler,
  7089. };
  7090. static const struct file_operations apidev_fops = {
  7091. .owner = THIS_MODULE,
  7092. .llseek = noop_llseek,
  7093. };
  7094. /**
  7095. * qla2x00_module_init - Module initialization.
  7096. **/
  7097. static int __init
  7098. qla2x00_module_init(void)
  7099. {
  7100. int ret = 0;
  7101. BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64);
  7102. BUILD_BUG_ON(sizeof(cmd_entry_t) != 64);
  7103. BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64);
  7104. BUILD_BUG_ON(sizeof(cont_entry_t) != 64);
  7105. BUILD_BUG_ON(sizeof(init_cb_t) != 96);
  7106. BUILD_BUG_ON(sizeof(mrk_entry_t) != 64);
  7107. BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64);
  7108. BUILD_BUG_ON(sizeof(request_t) != 64);
  7109. BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64);
  7110. BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64);
  7111. BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64);
  7112. BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64);
  7113. BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64);
  7114. BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64);
  7115. BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64);
  7116. BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64);
  7117. BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64);
  7118. BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64);
  7119. BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64);
  7120. BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64);
  7121. BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2604);
  7122. BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424);
  7123. BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164);
  7124. BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260);
  7125. BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260);
  7126. BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16);
  7127. BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
  7128. BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256);
  7129. BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24);
  7130. BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256);
  7131. BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288);
  7132. BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216);
  7133. BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64);
  7134. BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64);
  7135. BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64);
  7136. BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64);
  7137. BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128);
  7138. BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128);
  7139. BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64);
  7140. BUILD_BUG_ON(sizeof(struct mbx_entry) != 64);
  7141. BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252);
  7142. BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64);
  7143. BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512);
  7144. BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512);
  7145. BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64);
  7146. BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64);
  7147. BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64);
  7148. BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634);
  7149. BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100);
  7150. BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976);
  7151. BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228);
  7152. BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52);
  7153. BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172);
  7154. BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524);
  7155. BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8);
  7156. BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12);
  7157. BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24);
  7158. BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420);
  7159. BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28);
  7160. BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32);
  7161. BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196);
  7162. BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE);
  7163. BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128);
  7164. BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
  7165. BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
  7166. BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24);
  7167. BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16);
  7168. BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336);
  7169. BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
  7170. BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64);
  7171. BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64);
  7172. BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64);
  7173. BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
  7174. BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52);
  7175. BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
  7176. BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64);
  7177. BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64);
  7178. BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64);
  7179. BUILD_BUG_ON(sizeof(sts21_entry_t) != 64);
  7180. BUILD_BUG_ON(sizeof(sts22_entry_t) != 64);
  7181. BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64);
  7182. BUILD_BUG_ON(sizeof(sts_entry_t) != 64);
  7183. BUILD_BUG_ON(sizeof(sw_info_t) != 32);
  7184. BUILD_BUG_ON(sizeof(target_id_t) != 2);
  7185. qla_trace_init();
  7186. /* Allocate cache for SRBs. */
  7187. srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
  7188. SLAB_HWCACHE_ALIGN, NULL);
  7189. if (srb_cachep == NULL) {
  7190. ql_log(ql_log_fatal, NULL, 0x0001,
  7191. "Unable to allocate SRB cache...Failing load!.\n");
  7192. return -ENOMEM;
  7193. }
  7194. /* Initialize target kmem_cache and mem_pools */
  7195. ret = qlt_init();
  7196. if (ret < 0) {
  7197. goto destroy_cache;
  7198. } else if (ret > 0) {
  7199. /*
  7200. * If initiator mode is explictly disabled by qlt_init(),
  7201. * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
  7202. * performing scsi_scan_target() during LOOP UP event.
  7203. */
  7204. qla2xxx_transport_functions.disable_target_scan = 1;
  7205. qla2xxx_transport_vport_functions.disable_target_scan = 1;
  7206. }
  7207. /* Derive version string. */
  7208. strcpy(qla2x00_version_str, QLA2XXX_VERSION);
  7209. if (ql2xextended_error_logging)
  7210. strcat(qla2x00_version_str, "-debug");
  7211. if (ql2xextended_error_logging == 1)
  7212. ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
  7213. qla2xxx_transport_template =
  7214. fc_attach_transport(&qla2xxx_transport_functions);
  7215. if (!qla2xxx_transport_template) {
  7216. ql_log(ql_log_fatal, NULL, 0x0002,
  7217. "fc_attach_transport failed...Failing load!.\n");
  7218. ret = -ENODEV;
  7219. goto qlt_exit;
  7220. }
  7221. apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
  7222. if (apidev_major < 0) {
  7223. ql_log(ql_log_fatal, NULL, 0x0003,
  7224. "Unable to register char device %s.\n", QLA2XXX_APIDEV);
  7225. }
  7226. qla2xxx_transport_vport_template =
  7227. fc_attach_transport(&qla2xxx_transport_vport_functions);
  7228. if (!qla2xxx_transport_vport_template) {
  7229. ql_log(ql_log_fatal, NULL, 0x0004,
  7230. "fc_attach_transport vport failed...Failing load!.\n");
  7231. ret = -ENODEV;
  7232. goto unreg_chrdev;
  7233. }
  7234. ql_log(ql_log_info, NULL, 0x0005,
  7235. "QLogic Fibre Channel HBA Driver: %s.\n",
  7236. qla2x00_version_str);
  7237. ret = pci_register_driver(&qla2xxx_pci_driver);
  7238. if (ret) {
  7239. ql_log(ql_log_fatal, NULL, 0x0006,
  7240. "pci_register_driver failed...ret=%d Failing load!.\n",
  7241. ret);
  7242. goto release_vport_transport;
  7243. }
  7244. return ret;
  7245. release_vport_transport:
  7246. fc_release_transport(qla2xxx_transport_vport_template);
  7247. unreg_chrdev:
  7248. if (apidev_major >= 0)
  7249. unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
  7250. fc_release_transport(qla2xxx_transport_template);
  7251. qlt_exit:
  7252. qlt_exit();
  7253. destroy_cache:
  7254. kmem_cache_destroy(srb_cachep);
  7255. qla_trace_uninit();
  7256. return ret;
  7257. }
  7258. /**
  7259. * qla2x00_module_exit - Module cleanup.
  7260. **/
  7261. static void __exit
  7262. qla2x00_module_exit(void)
  7263. {
  7264. pci_unregister_driver(&qla2xxx_pci_driver);
  7265. qla2x00_release_firmware();
  7266. kmem_cache_destroy(ctx_cachep);
  7267. fc_release_transport(qla2xxx_transport_vport_template);
  7268. if (apidev_major >= 0)
  7269. unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
  7270. fc_release_transport(qla2xxx_transport_template);
  7271. qlt_exit();
  7272. kmem_cache_destroy(srb_cachep);
  7273. qla_trace_uninit();
  7274. }
  7275. module_init(qla2x00_module_init);
  7276. module_exit(qla2x00_module_exit);
  7277. MODULE_AUTHOR("QLogic Corporation");
  7278. MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
  7279. MODULE_LICENSE("GPL");
  7280. MODULE_FIRMWARE(FW_FILE_ISP21XX);
  7281. MODULE_FIRMWARE(FW_FILE_ISP22XX);
  7282. MODULE_FIRMWARE(FW_FILE_ISP2300);
  7283. MODULE_FIRMWARE(FW_FILE_ISP2322);
  7284. MODULE_FIRMWARE(FW_FILE_ISP24XX);
  7285. MODULE_FIRMWARE(FW_FILE_ISP25XX);