qla_isr.c 125 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic Fibre Channel HBA Driver
  4. * Copyright (c) 2003-2014 QLogic Corporation
  5. */
  6. #include "qla_def.h"
  7. #include "qla_target.h"
  8. #include "qla_gbl.h"
  9. #include <linux/delay.h>
  10. #include <linux/slab.h>
  11. #include <linux/cpu.h>
  12. #include <linux/t10-pi.h>
  13. #include <scsi/scsi_tcq.h>
  14. #include <scsi/scsi_bsg_fc.h>
  15. #include <scsi/scsi_eh.h>
  16. #include <scsi/fc/fc_fs.h>
  17. #include <linux/nvme-fc-driver.h>
  18. static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
  19. static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
  20. static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
  21. static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
  22. sts_entry_t *);
  23. static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha,
  24. struct purex_item *item);
  25. static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha,
  26. uint16_t size);
  27. static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha,
  28. void *pkt);
  29. static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha,
  30. void **pkt, struct rsp_que **rsp);
  31. static void
  32. qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item)
  33. {
  34. void *pkt = &item->iocb;
  35. uint16_t pkt_size = item->size;
  36. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d,
  37. "%s: Enter\n", __func__);
  38. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e,
  39. "-------- ELS REQ -------\n");
  40. ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f,
  41. pkt, pkt_size);
  42. fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt);
  43. }
  44. const char *const port_state_str[] = {
  45. [FCS_UNKNOWN] = "Unknown",
  46. [FCS_UNCONFIGURED] = "UNCONFIGURED",
  47. [FCS_DEVICE_DEAD] = "DEAD",
  48. [FCS_DEVICE_LOST] = "LOST",
  49. [FCS_ONLINE] = "ONLINE"
  50. };
  51. static void
  52. qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
  53. {
  54. struct abts_entry_24xx *abts =
  55. (struct abts_entry_24xx *)&pkt->iocb;
  56. struct qla_hw_data *ha = vha->hw;
  57. struct els_entry_24xx *rsp_els;
  58. struct abts_entry_24xx *abts_rsp;
  59. dma_addr_t dma;
  60. uint32_t fctl;
  61. int rval;
  62. ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__);
  63. ql_log(ql_log_warn, vha, 0x0287,
  64. "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n",
  65. abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id,
  66. abts->seq_id, abts->seq_cnt);
  67. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
  68. "-------- ABTS RCV -------\n");
  69. ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
  70. (uint8_t *)abts, sizeof(*abts));
  71. rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma,
  72. GFP_KERNEL);
  73. if (!rsp_els) {
  74. ql_log(ql_log_warn, vha, 0x0287,
  75. "Failed allocate dma buffer ABTS/ELS RSP.\n");
  76. return;
  77. }
  78. /* terminate exchange */
  79. rsp_els->entry_type = ELS_IOCB_TYPE;
  80. rsp_els->entry_count = 1;
  81. rsp_els->nport_handle = cpu_to_le16(~0);
  82. rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
  83. rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
  84. ql_dbg(ql_dbg_init, vha, 0x0283,
  85. "Sending ELS Response to terminate exchange %#x...\n",
  86. abts->rx_xch_addr_to_abort);
  87. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
  88. "-------- ELS RSP -------\n");
  89. ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
  90. (uint8_t *)rsp_els, sizeof(*rsp_els));
  91. rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0);
  92. if (rval) {
  93. ql_log(ql_log_warn, vha, 0x0288,
  94. "%s: iocb failed to execute -> %x\n", __func__, rval);
  95. } else if (rsp_els->comp_status) {
  96. ql_log(ql_log_warn, vha, 0x0289,
  97. "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
  98. __func__, rsp_els->comp_status,
  99. rsp_els->error_subcode_1, rsp_els->error_subcode_2);
  100. } else {
  101. ql_dbg(ql_dbg_init, vha, 0x028a,
  102. "%s: abort exchange done.\n", __func__);
  103. }
  104. /* send ABTS response */
  105. abts_rsp = (void *)rsp_els;
  106. memset(abts_rsp, 0, sizeof(*abts_rsp));
  107. abts_rsp->entry_type = ABTS_RSP_TYPE;
  108. abts_rsp->entry_count = 1;
  109. abts_rsp->nport_handle = abts->nport_handle;
  110. abts_rsp->vp_idx = abts->vp_idx;
  111. abts_rsp->sof_type = abts->sof_type & 0xf0;
  112. abts_rsp->rx_xch_addr = abts->rx_xch_addr;
  113. abts_rsp->d_id[0] = abts->s_id[0];
  114. abts_rsp->d_id[1] = abts->s_id[1];
  115. abts_rsp->d_id[2] = abts->s_id[2];
  116. abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC;
  117. abts_rsp->s_id[0] = abts->d_id[0];
  118. abts_rsp->s_id[1] = abts->d_id[1];
  119. abts_rsp->s_id[2] = abts->d_id[2];
  120. abts_rsp->cs_ctl = abts->cs_ctl;
  121. /* include flipping bit23 in fctl */
  122. fctl = ~(abts->f_ctl[2] | 0x7F) << 16 |
  123. FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT;
  124. abts_rsp->f_ctl[0] = fctl >> 0 & 0xff;
  125. abts_rsp->f_ctl[1] = fctl >> 8 & 0xff;
  126. abts_rsp->f_ctl[2] = fctl >> 16 & 0xff;
  127. abts_rsp->type = FC_TYPE_BLD;
  128. abts_rsp->rx_id = abts->rx_id;
  129. abts_rsp->ox_id = abts->ox_id;
  130. abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
  131. abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
  132. abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
  133. abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
  134. ql_dbg(ql_dbg_init, vha, 0x028b,
  135. "Sending BA ACC response to ABTS %#x...\n",
  136. abts->rx_xch_addr_to_abort);
  137. ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
  138. "-------- ELS RSP -------\n");
  139. ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
  140. (uint8_t *)abts_rsp, sizeof(*abts_rsp));
  141. rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0);
  142. if (rval) {
  143. ql_log(ql_log_warn, vha, 0x028c,
  144. "%s: iocb failed to execute -> %x\n", __func__, rval);
  145. } else if (abts_rsp->comp_status) {
  146. ql_log(ql_log_warn, vha, 0x028d,
  147. "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
  148. __func__, abts_rsp->comp_status,
  149. abts_rsp->payload.error.subcode1,
  150. abts_rsp->payload.error.subcode2);
  151. } else {
  152. ql_dbg(ql_dbg_init, vha, 0x028ea,
  153. "%s: done.\n", __func__);
  154. }
  155. dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
  156. }
  157. /**
  158. * __qla_consume_iocb - this routine is used to tell fw driver has processed
  159. * or consumed the head IOCB along with the continuation IOCB's from the
  160. * provided respond queue.
  161. * @vha: host adapter pointer
  162. * @pkt: pointer to current packet. On return, this pointer shall move
  163. * to the next packet.
  164. * @rsp: respond queue pointer.
  165. *
  166. * it is assumed pkt is the head iocb, not the continuation iocbk
  167. */
  168. void __qla_consume_iocb(struct scsi_qla_host *vha,
  169. void **pkt, struct rsp_que **rsp)
  170. {
  171. struct rsp_que *rsp_q = *rsp;
  172. response_t *new_pkt;
  173. uint16_t entry_count_remaining;
  174. struct purex_entry_24xx *purex = *pkt;
  175. entry_count_remaining = purex->entry_count;
  176. while (entry_count_remaining > 0) {
  177. new_pkt = rsp_q->ring_ptr;
  178. *pkt = new_pkt;
  179. rsp_q->ring_index++;
  180. if (rsp_q->ring_index == rsp_q->length) {
  181. rsp_q->ring_index = 0;
  182. rsp_q->ring_ptr = rsp_q->ring;
  183. } else {
  184. rsp_q->ring_ptr++;
  185. }
  186. new_pkt->signature = RESPONSE_PROCESSED;
  187. /* flush signature */
  188. wmb();
  189. --entry_count_remaining;
  190. }
  191. }
  192. /**
  193. * __qla_copy_purex_to_buffer - extract ELS payload from Purex IOCB
  194. * and save to provided buffer
  195. * @vha: host adapter pointer
  196. * @pkt: pointer Purex IOCB
  197. * @rsp: respond queue
  198. * @buf: extracted ELS payload copy here
  199. * @buf_len: buffer length
  200. */
  201. int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha,
  202. void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len)
  203. {
  204. struct purex_entry_24xx *purex = *pkt;
  205. struct rsp_que *rsp_q = *rsp;
  206. sts_cont_entry_t *new_pkt;
  207. uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
  208. uint16_t buffer_copy_offset = 0;
  209. uint16_t entry_count_remaining;
  210. u16 tpad;
  211. entry_count_remaining = purex->entry_count;
  212. total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
  213. - PURX_ELS_HEADER_SIZE;
  214. /*
  215. * end of payload may not end in 4bytes boundary. Need to
  216. * round up / pad for room to swap, before saving data
  217. */
  218. tpad = roundup(total_bytes, 4);
  219. if (buf_len < tpad) {
  220. ql_dbg(ql_dbg_async, vha, 0x5084,
  221. "%s buffer is too small %d < %d\n",
  222. __func__, buf_len, tpad);
  223. __qla_consume_iocb(vha, pkt, rsp);
  224. return -EIO;
  225. }
  226. pending_bytes = total_bytes = tpad;
  227. no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
  228. sizeof(purex->els_frame_payload) : pending_bytes;
  229. memcpy(buf, &purex->els_frame_payload[0], no_bytes);
  230. buffer_copy_offset += no_bytes;
  231. pending_bytes -= no_bytes;
  232. --entry_count_remaining;
  233. ((response_t *)purex)->signature = RESPONSE_PROCESSED;
  234. /* flush signature */
  235. wmb();
  236. do {
  237. while ((total_bytes > 0) && (entry_count_remaining > 0)) {
  238. new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
  239. *pkt = new_pkt;
  240. if (new_pkt->entry_type != STATUS_CONT_TYPE) {
  241. ql_log(ql_log_warn, vha, 0x507a,
  242. "Unexpected IOCB type, partial data 0x%x\n",
  243. buffer_copy_offset);
  244. break;
  245. }
  246. rsp_q->ring_index++;
  247. if (rsp_q->ring_index == rsp_q->length) {
  248. rsp_q->ring_index = 0;
  249. rsp_q->ring_ptr = rsp_q->ring;
  250. } else {
  251. rsp_q->ring_ptr++;
  252. }
  253. no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
  254. sizeof(new_pkt->data) : pending_bytes;
  255. if ((buffer_copy_offset + no_bytes) <= total_bytes) {
  256. memcpy((buf + buffer_copy_offset), new_pkt->data,
  257. no_bytes);
  258. buffer_copy_offset += no_bytes;
  259. pending_bytes -= no_bytes;
  260. --entry_count_remaining;
  261. } else {
  262. ql_log(ql_log_warn, vha, 0x5044,
  263. "Attempt to copy more that we got, optimizing..%x\n",
  264. buffer_copy_offset);
  265. memcpy((buf + buffer_copy_offset), new_pkt->data,
  266. total_bytes - buffer_copy_offset);
  267. }
  268. ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
  269. /* flush signature */
  270. wmb();
  271. }
  272. if (pending_bytes != 0 || entry_count_remaining != 0) {
  273. ql_log(ql_log_fatal, vha, 0x508b,
  274. "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n",
  275. total_bytes, entry_count_remaining);
  276. return -EIO;
  277. }
  278. } while (entry_count_remaining > 0);
  279. be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2);
  280. return 0;
  281. }
  282. /**
  283. * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
  284. * @irq: interrupt number
  285. * @dev_id: SCSI driver HA context
  286. *
  287. * Called by system whenever the host adapter generates an interrupt.
  288. *
  289. * Returns handled flag.
  290. */
  291. irqreturn_t
  292. qla2100_intr_handler(int irq, void *dev_id)
  293. {
  294. scsi_qla_host_t *vha;
  295. struct qla_hw_data *ha;
  296. struct device_reg_2xxx __iomem *reg;
  297. int status;
  298. unsigned long iter;
  299. uint16_t hccr;
  300. uint16_t mb[8];
  301. struct rsp_que *rsp;
  302. unsigned long flags;
  303. rsp = (struct rsp_que *) dev_id;
  304. if (!rsp) {
  305. ql_log(ql_log_info, NULL, 0x505d,
  306. "%s: NULL response queue pointer.\n", __func__);
  307. return (IRQ_NONE);
  308. }
  309. ha = rsp->hw;
  310. reg = &ha->iobase->isp;
  311. status = 0;
  312. spin_lock_irqsave(&ha->hardware_lock, flags);
  313. vha = pci_get_drvdata(ha->pdev);
  314. for (iter = 50; iter--; ) {
  315. hccr = rd_reg_word(&reg->hccr);
  316. if (qla2x00_check_reg16_for_disconnect(vha, hccr))
  317. break;
  318. if (hccr & HCCR_RISC_PAUSE) {
  319. if (pci_channel_offline(ha->pdev))
  320. break;
  321. /*
  322. * Issue a "HARD" reset in order for the RISC interrupt
  323. * bit to be cleared. Schedule a big hammer to get
  324. * out of the RISC PAUSED state.
  325. */
  326. wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
  327. rd_reg_word(&reg->hccr);
  328. ha->isp_ops->fw_dump(vha);
  329. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  330. break;
  331. } else if ((rd_reg_word(&reg->istatus) & ISR_RISC_INT) == 0)
  332. break;
  333. if (rd_reg_word(&reg->semaphore) & BIT_0) {
  334. wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
  335. rd_reg_word(&reg->hccr);
  336. /* Get mailbox data. */
  337. mb[0] = RD_MAILBOX_REG(ha, reg, 0);
  338. if (mb[0] > 0x3fff && mb[0] < 0x8000) {
  339. qla2x00_mbx_completion(vha, mb[0]);
  340. status |= MBX_INTERRUPT;
  341. } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
  342. mb[1] = RD_MAILBOX_REG(ha, reg, 1);
  343. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  344. mb[3] = RD_MAILBOX_REG(ha, reg, 3);
  345. qla2x00_async_event(vha, rsp, mb);
  346. } else {
  347. /*EMPTY*/
  348. ql_dbg(ql_dbg_async, vha, 0x5025,
  349. "Unrecognized interrupt type (%d).\n",
  350. mb[0]);
  351. }
  352. /* Release mailbox registers. */
  353. wrt_reg_word(&reg->semaphore, 0);
  354. rd_reg_word(&reg->semaphore);
  355. } else {
  356. qla2x00_process_response_queue(rsp);
  357. wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
  358. rd_reg_word(&reg->hccr);
  359. }
  360. }
  361. qla2x00_handle_mbx_completion(ha, status);
  362. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  363. return (IRQ_HANDLED);
  364. }
  365. bool
  366. qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
  367. {
  368. /* Check for PCI disconnection */
  369. if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
  370. if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
  371. !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
  372. !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
  373. qla_schedule_eeh_work(vha);
  374. }
  375. return true;
  376. } else
  377. return false;
  378. }
  379. bool
  380. qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
  381. {
  382. return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
  383. }
  384. /**
  385. * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
  386. * @irq: interrupt number
  387. * @dev_id: SCSI driver HA context
  388. *
  389. * Called by system whenever the host adapter generates an interrupt.
  390. *
  391. * Returns handled flag.
  392. */
  393. irqreturn_t
  394. qla2300_intr_handler(int irq, void *dev_id)
  395. {
  396. scsi_qla_host_t *vha;
  397. struct device_reg_2xxx __iomem *reg;
  398. int status;
  399. unsigned long iter;
  400. uint32_t stat;
  401. uint16_t hccr;
  402. uint16_t mb[8];
  403. struct rsp_que *rsp;
  404. struct qla_hw_data *ha;
  405. unsigned long flags;
  406. rsp = (struct rsp_que *) dev_id;
  407. if (!rsp) {
  408. ql_log(ql_log_info, NULL, 0x5058,
  409. "%s: NULL response queue pointer.\n", __func__);
  410. return (IRQ_NONE);
  411. }
  412. ha = rsp->hw;
  413. reg = &ha->iobase->isp;
  414. status = 0;
  415. spin_lock_irqsave(&ha->hardware_lock, flags);
  416. vha = pci_get_drvdata(ha->pdev);
  417. for (iter = 50; iter--; ) {
  418. stat = rd_reg_dword(&reg->u.isp2300.host_status);
  419. if (qla2x00_check_reg32_for_disconnect(vha, stat))
  420. break;
  421. if (stat & HSR_RISC_PAUSED) {
  422. if (unlikely(pci_channel_offline(ha->pdev)))
  423. break;
  424. hccr = rd_reg_word(&reg->hccr);
  425. if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
  426. ql_log(ql_log_warn, vha, 0x5026,
  427. "Parity error -- HCCR=%x, Dumping "
  428. "firmware.\n", hccr);
  429. else
  430. ql_log(ql_log_warn, vha, 0x5027,
  431. "RISC paused -- HCCR=%x, Dumping "
  432. "firmware.\n", hccr);
  433. /*
  434. * Issue a "HARD" reset in order for the RISC
  435. * interrupt bit to be cleared. Schedule a big
  436. * hammer to get out of the RISC PAUSED state.
  437. */
  438. wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
  439. rd_reg_word(&reg->hccr);
  440. ha->isp_ops->fw_dump(vha);
  441. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  442. break;
  443. } else if ((stat & HSR_RISC_INT) == 0)
  444. break;
  445. switch (stat & 0xff) {
  446. case 0x1:
  447. case 0x2:
  448. case 0x10:
  449. case 0x11:
  450. qla2x00_mbx_completion(vha, MSW(stat));
  451. status |= MBX_INTERRUPT;
  452. /* Release mailbox registers. */
  453. wrt_reg_word(&reg->semaphore, 0);
  454. break;
  455. case 0x12:
  456. mb[0] = MSW(stat);
  457. mb[1] = RD_MAILBOX_REG(ha, reg, 1);
  458. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  459. mb[3] = RD_MAILBOX_REG(ha, reg, 3);
  460. qla2x00_async_event(vha, rsp, mb);
  461. break;
  462. case 0x13:
  463. qla2x00_process_response_queue(rsp);
  464. break;
  465. case 0x15:
  466. mb[0] = MBA_CMPLT_1_16BIT;
  467. mb[1] = MSW(stat);
  468. qla2x00_async_event(vha, rsp, mb);
  469. break;
  470. case 0x16:
  471. mb[0] = MBA_SCSI_COMPLETION;
  472. mb[1] = MSW(stat);
  473. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  474. qla2x00_async_event(vha, rsp, mb);
  475. break;
  476. default:
  477. ql_dbg(ql_dbg_async, vha, 0x5028,
  478. "Unrecognized interrupt type (%d).\n", stat & 0xff);
  479. break;
  480. }
  481. wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
  482. rd_reg_word_relaxed(&reg->hccr);
  483. }
  484. qla2x00_handle_mbx_completion(ha, status);
  485. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  486. return (IRQ_HANDLED);
  487. }
  488. /**
  489. * qla2x00_mbx_completion() - Process mailbox command completions.
  490. * @vha: SCSI driver HA context
  491. * @mb0: Mailbox0 register
  492. */
  493. static void
  494. qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
  495. {
  496. uint16_t cnt;
  497. uint32_t mboxes;
  498. __le16 __iomem *wptr;
  499. struct qla_hw_data *ha = vha->hw;
  500. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  501. /* Read all mbox registers? */
  502. WARN_ON_ONCE(ha->mbx_count > 32);
  503. mboxes = (1ULL << ha->mbx_count) - 1;
  504. if (!ha->mcp)
  505. ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
  506. else
  507. mboxes = ha->mcp->in_mb;
  508. /* Load return mailbox registers. */
  509. ha->flags.mbox_int = 1;
  510. ha->mailbox_out[0] = mb0;
  511. mboxes >>= 1;
  512. wptr = MAILBOX_REG(ha, reg, 1);
  513. for (cnt = 1; cnt < ha->mbx_count; cnt++) {
  514. if (IS_QLA2200(ha) && cnt == 8)
  515. wptr = MAILBOX_REG(ha, reg, 8);
  516. if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
  517. ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
  518. else if (mboxes & BIT_0)
  519. ha->mailbox_out[cnt] = rd_reg_word(wptr);
  520. wptr++;
  521. mboxes >>= 1;
  522. }
  523. }
  524. static void
  525. qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
  526. {
  527. static char *event[] =
  528. { "Complete", "Request Notification", "Time Extension" };
  529. int rval;
  530. struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
  531. struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
  532. __le16 __iomem *wptr;
  533. uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
  534. /* Seed data -- mailbox1 -> mailbox7. */
  535. if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
  536. wptr = &reg24->mailbox1;
  537. else if (IS_QLA8044(vha->hw))
  538. wptr = &reg82->mailbox_out[1];
  539. else
  540. return;
  541. for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
  542. mb[cnt] = rd_reg_word(wptr);
  543. ql_dbg(ql_dbg_async, vha, 0x5021,
  544. "Inter-Driver Communication %s -- "
  545. "%04x %04x %04x %04x %04x %04x %04x.\n",
  546. event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
  547. mb[4], mb[5], mb[6]);
  548. switch (aen) {
  549. /* Handle IDC Error completion case. */
  550. case MBA_IDC_COMPLETE:
  551. if (mb[1] >> 15) {
  552. vha->hw->flags.idc_compl_status = 1;
  553. if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
  554. complete(&vha->hw->dcbx_comp);
  555. }
  556. break;
  557. case MBA_IDC_NOTIFY:
  558. /* Acknowledgement needed? [Notify && non-zero timeout]. */
  559. timeout = (descr >> 8) & 0xf;
  560. ql_dbg(ql_dbg_async, vha, 0x5022,
  561. "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
  562. vha->host_no, event[aen & 0xff], timeout);
  563. if (!timeout)
  564. return;
  565. rval = qla2x00_post_idc_ack_work(vha, mb);
  566. if (rval != QLA_SUCCESS)
  567. ql_log(ql_log_warn, vha, 0x5023,
  568. "IDC failed to post ACK.\n");
  569. break;
  570. case MBA_IDC_TIME_EXT:
  571. vha->hw->idc_extend_tmo = descr;
  572. ql_dbg(ql_dbg_async, vha, 0x5087,
  573. "%lu Inter-Driver Communication %s -- "
  574. "Extend timeout by=%d.\n",
  575. vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
  576. break;
  577. }
  578. }
  579. #define LS_UNKNOWN 2
  580. const char *
  581. qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
  582. {
  583. static const char *const link_speeds[] = {
  584. "1", "2", "?", "4", "8", "16", "32", "64", "10"
  585. };
  586. #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
  587. if (IS_QLA2100(ha) || IS_QLA2200(ha))
  588. return link_speeds[0];
  589. else if (speed == 0x13)
  590. return link_speeds[QLA_LAST_SPEED];
  591. else if (speed < QLA_LAST_SPEED)
  592. return link_speeds[speed];
  593. else
  594. return link_speeds[LS_UNKNOWN];
  595. }
  596. static void
  597. qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
  598. {
  599. struct qla_hw_data *ha = vha->hw;
  600. /*
  601. * 8200 AEN Interpretation:
  602. * mb[0] = AEN code
  603. * mb[1] = AEN Reason code
  604. * mb[2] = LSW of Peg-Halt Status-1 Register
  605. * mb[6] = MSW of Peg-Halt Status-1 Register
  606. * mb[3] = LSW of Peg-Halt Status-2 register
  607. * mb[7] = MSW of Peg-Halt Status-2 register
  608. * mb[4] = IDC Device-State Register value
  609. * mb[5] = IDC Driver-Presence Register value
  610. */
  611. ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
  612. "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
  613. mb[0], mb[1], mb[2], mb[6]);
  614. ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
  615. "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
  616. "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
  617. if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
  618. IDC_HEARTBEAT_FAILURE)) {
  619. ha->flags.nic_core_hung = 1;
  620. ql_log(ql_log_warn, vha, 0x5060,
  621. "83XX: F/W Error Reported: Check if reset required.\n");
  622. if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
  623. uint32_t protocol_engine_id, fw_err_code, err_level;
  624. /*
  625. * IDC_PEG_HALT_STATUS_CHANGE interpretation:
  626. * - PEG-Halt Status-1 Register:
  627. * (LSW = mb[2], MSW = mb[6])
  628. * Bits 0-7 = protocol-engine ID
  629. * Bits 8-28 = f/w error code
  630. * Bits 29-31 = Error-level
  631. * Error-level 0x1 = Non-Fatal error
  632. * Error-level 0x2 = Recoverable Fatal error
  633. * Error-level 0x4 = UnRecoverable Fatal error
  634. * - PEG-Halt Status-2 Register:
  635. * (LSW = mb[3], MSW = mb[7])
  636. */
  637. protocol_engine_id = (mb[2] & 0xff);
  638. fw_err_code = (((mb[2] & 0xff00) >> 8) |
  639. ((mb[6] & 0x1fff) << 8));
  640. err_level = ((mb[6] & 0xe000) >> 13);
  641. ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
  642. "Register: protocol_engine_id=0x%x "
  643. "fw_err_code=0x%x err_level=0x%x.\n",
  644. protocol_engine_id, fw_err_code, err_level);
  645. ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
  646. "Register: 0x%x%x.\n", mb[7], mb[3]);
  647. if (err_level == ERR_LEVEL_NON_FATAL) {
  648. ql_log(ql_log_warn, vha, 0x5063,
  649. "Not a fatal error, f/w has recovered itself.\n");
  650. } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
  651. ql_log(ql_log_fatal, vha, 0x5064,
  652. "Recoverable Fatal error: Chip reset "
  653. "required.\n");
  654. qla83xx_schedule_work(vha,
  655. QLA83XX_NIC_CORE_RESET);
  656. } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
  657. ql_log(ql_log_fatal, vha, 0x5065,
  658. "Unrecoverable Fatal error: Set FAILED "
  659. "state, reboot required.\n");
  660. qla83xx_schedule_work(vha,
  661. QLA83XX_NIC_CORE_UNRECOVERABLE);
  662. }
  663. }
  664. if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
  665. uint16_t peg_fw_state, nw_interface_link_up;
  666. uint16_t nw_interface_signal_detect, sfp_status;
  667. uint16_t htbt_counter, htbt_monitor_enable;
  668. uint16_t sfp_additional_info, sfp_multirate;
  669. uint16_t sfp_tx_fault, link_speed, dcbx_status;
  670. /*
  671. * IDC_NIC_FW_REPORTED_FAILURE interpretation:
  672. * - PEG-to-FC Status Register:
  673. * (LSW = mb[2], MSW = mb[6])
  674. * Bits 0-7 = Peg-Firmware state
  675. * Bit 8 = N/W Interface Link-up
  676. * Bit 9 = N/W Interface signal detected
  677. * Bits 10-11 = SFP Status
  678. * SFP Status 0x0 = SFP+ transceiver not expected
  679. * SFP Status 0x1 = SFP+ transceiver not present
  680. * SFP Status 0x2 = SFP+ transceiver invalid
  681. * SFP Status 0x3 = SFP+ transceiver present and
  682. * valid
  683. * Bits 12-14 = Heartbeat Counter
  684. * Bit 15 = Heartbeat Monitor Enable
  685. * Bits 16-17 = SFP Additional Info
  686. * SFP info 0x0 = Unregocnized transceiver for
  687. * Ethernet
  688. * SFP info 0x1 = SFP+ brand validation failed
  689. * SFP info 0x2 = SFP+ speed validation failed
  690. * SFP info 0x3 = SFP+ access error
  691. * Bit 18 = SFP Multirate
  692. * Bit 19 = SFP Tx Fault
  693. * Bits 20-22 = Link Speed
  694. * Bits 23-27 = Reserved
  695. * Bits 28-30 = DCBX Status
  696. * DCBX Status 0x0 = DCBX Disabled
  697. * DCBX Status 0x1 = DCBX Enabled
  698. * DCBX Status 0x2 = DCBX Exchange error
  699. * Bit 31 = Reserved
  700. */
  701. peg_fw_state = (mb[2] & 0x00ff);
  702. nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
  703. nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
  704. sfp_status = ((mb[2] & 0x0c00) >> 10);
  705. htbt_counter = ((mb[2] & 0x7000) >> 12);
  706. htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
  707. sfp_additional_info = (mb[6] & 0x0003);
  708. sfp_multirate = ((mb[6] & 0x0004) >> 2);
  709. sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
  710. link_speed = ((mb[6] & 0x0070) >> 4);
  711. dcbx_status = ((mb[6] & 0x7000) >> 12);
  712. ql_log(ql_log_warn, vha, 0x5066,
  713. "Peg-to-Fc Status Register:\n"
  714. "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
  715. "nw_interface_signal_detect=0x%x"
  716. "\nsfp_statis=0x%x.\n ", peg_fw_state,
  717. nw_interface_link_up, nw_interface_signal_detect,
  718. sfp_status);
  719. ql_log(ql_log_warn, vha, 0x5067,
  720. "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
  721. "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
  722. htbt_counter, htbt_monitor_enable,
  723. sfp_additional_info, sfp_multirate);
  724. ql_log(ql_log_warn, vha, 0x5068,
  725. "sfp_tx_fault=0x%x, link_state=0x%x, "
  726. "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
  727. dcbx_status);
  728. qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
  729. }
  730. if (mb[1] & IDC_HEARTBEAT_FAILURE) {
  731. ql_log(ql_log_warn, vha, 0x5069,
  732. "Heartbeat Failure encountered, chip reset "
  733. "required.\n");
  734. qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
  735. }
  736. }
  737. if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
  738. ql_log(ql_log_info, vha, 0x506a,
  739. "IDC Device-State changed = 0x%x.\n", mb[4]);
  740. if (ha->flags.nic_core_reset_owner)
  741. return;
  742. qla83xx_schedule_work(vha, MBA_IDC_AEN);
  743. }
  744. }
  745. int
  746. qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
  747. {
  748. struct qla_hw_data *ha = vha->hw;
  749. scsi_qla_host_t *vp;
  750. uint32_t vp_did;
  751. unsigned long flags;
  752. int ret = 0;
  753. if (!ha->num_vhosts)
  754. return ret;
  755. spin_lock_irqsave(&ha->vport_slock, flags);
  756. list_for_each_entry(vp, &ha->vp_list, list) {
  757. vp_did = vp->d_id.b24;
  758. if (vp_did == rscn_entry) {
  759. ret = 1;
  760. break;
  761. }
  762. }
  763. spin_unlock_irqrestore(&ha->vport_slock, flags);
  764. return ret;
  765. }
  766. fc_port_t *
  767. qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
  768. {
  769. fc_port_t *f, *tf;
  770. f = tf = NULL;
  771. list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
  772. if (f->loop_id == loop_id)
  773. return f;
  774. return NULL;
  775. }
  776. fc_port_t *
  777. qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
  778. {
  779. fc_port_t *f, *tf;
  780. f = tf = NULL;
  781. list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
  782. if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
  783. if (incl_deleted)
  784. return f;
  785. else if (f->deleted == 0)
  786. return f;
  787. }
  788. }
  789. return NULL;
  790. }
  791. fc_port_t *
  792. qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
  793. u8 incl_deleted)
  794. {
  795. fc_port_t *f, *tf;
  796. f = tf = NULL;
  797. list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
  798. if (f->d_id.b24 == id->b24) {
  799. if (incl_deleted)
  800. return f;
  801. else if (f->deleted == 0)
  802. return f;
  803. }
  804. }
  805. return NULL;
  806. }
  807. /* Shall be called only on supported adapters. */
  808. static void
  809. qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
  810. {
  811. struct qla_hw_data *ha = vha->hw;
  812. bool reset_isp_needed = false;
  813. ql_log(ql_log_warn, vha, 0x02f0,
  814. "MPI Heartbeat stop. MPI reset is%s needed. "
  815. "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
  816. mb[1] & BIT_8 ? "" : " not",
  817. mb[0], mb[1], mb[2], mb[3]);
  818. if ((mb[1] & BIT_8) == 0)
  819. return;
  820. ql_log(ql_log_warn, vha, 0x02f1,
  821. "MPI Heartbeat stop. FW dump needed\n");
  822. if (ql2xfulldump_on_mpifail) {
  823. ha->isp_ops->fw_dump(vha);
  824. reset_isp_needed = true;
  825. }
  826. ha->isp_ops->mpi_fw_dump(vha, 1);
  827. if (reset_isp_needed) {
  828. vha->hw->flags.fw_init_done = 0;
  829. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  830. qla2xxx_wake_dpc(vha);
  831. }
  832. }
  833. static struct purex_item *
  834. qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size)
  835. {
  836. struct purex_item *item = NULL;
  837. uint8_t item_hdr_size = sizeof(*item);
  838. if (size > QLA_DEFAULT_PAYLOAD_SIZE) {
  839. item = kzalloc(item_hdr_size +
  840. (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC);
  841. } else {
  842. if (atomic_inc_return(&vha->default_item.in_use) == 1) {
  843. item = &vha->default_item;
  844. goto initialize_purex_header;
  845. } else {
  846. item = kzalloc(item_hdr_size, GFP_ATOMIC);
  847. }
  848. }
  849. if (!item) {
  850. ql_log(ql_log_warn, vha, 0x5092,
  851. ">> Failed allocate purex list item.\n");
  852. return NULL;
  853. }
  854. initialize_purex_header:
  855. item->vha = vha;
  856. item->size = size;
  857. return item;
  858. }
  859. static void
  860. qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
  861. void (*process_item)(struct scsi_qla_host *vha,
  862. struct purex_item *pkt))
  863. {
  864. struct purex_list *list = &vha->purex_list;
  865. ulong flags;
  866. pkt->process_item = process_item;
  867. spin_lock_irqsave(&list->lock, flags);
  868. list_add_tail(&pkt->list, &list->head);
  869. spin_unlock_irqrestore(&list->lock, flags);
  870. set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
  871. }
  872. /**
  873. * qla24xx_copy_std_pkt() - Copy over purex ELS which is
  874. * contained in a single IOCB.
  875. * purex packet.
  876. * @vha: SCSI driver HA context
  877. * @pkt: ELS packet
  878. */
  879. static struct purex_item
  880. *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
  881. {
  882. struct purex_item *item;
  883. item = qla24xx_alloc_purex_item(vha,
  884. QLA_DEFAULT_PAYLOAD_SIZE);
  885. if (!item)
  886. return item;
  887. memcpy(&item->iocb, pkt, sizeof(item->iocb));
  888. return item;
  889. }
  890. /**
  891. * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can
  892. * span over multiple IOCBs.
  893. * @vha: SCSI driver HA context
  894. * @pkt: ELS packet
  895. * @rsp: Response queue
  896. */
  897. static struct purex_item *
  898. qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt,
  899. struct rsp_que **rsp)
  900. {
  901. struct purex_entry_24xx *purex = *pkt;
  902. struct rsp_que *rsp_q = *rsp;
  903. sts_cont_entry_t *new_pkt;
  904. uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
  905. uint16_t buffer_copy_offset = 0;
  906. uint16_t entry_count, entry_count_remaining;
  907. struct purex_item *item;
  908. void *fpin_pkt = NULL;
  909. total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
  910. - PURX_ELS_HEADER_SIZE;
  911. pending_bytes = total_bytes;
  912. entry_count = entry_count_remaining = purex->entry_count;
  913. no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
  914. sizeof(purex->els_frame_payload) : pending_bytes;
  915. ql_log(ql_log_info, vha, 0x509a,
  916. "FPIN ELS, frame_size 0x%x, entry count %d\n",
  917. total_bytes, entry_count);
  918. item = qla24xx_alloc_purex_item(vha, total_bytes);
  919. if (!item)
  920. return item;
  921. fpin_pkt = &item->iocb;
  922. memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes);
  923. buffer_copy_offset += no_bytes;
  924. pending_bytes -= no_bytes;
  925. --entry_count_remaining;
  926. ((response_t *)purex)->signature = RESPONSE_PROCESSED;
  927. wmb();
  928. do {
  929. while ((total_bytes > 0) && (entry_count_remaining > 0)) {
  930. if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
  931. ql_dbg(ql_dbg_async, vha, 0x5084,
  932. "Ran out of IOCBs, partial data 0x%x\n",
  933. buffer_copy_offset);
  934. cpu_relax();
  935. continue;
  936. }
  937. new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
  938. *pkt = new_pkt;
  939. if (new_pkt->entry_type != STATUS_CONT_TYPE) {
  940. ql_log(ql_log_warn, vha, 0x507a,
  941. "Unexpected IOCB type, partial data 0x%x\n",
  942. buffer_copy_offset);
  943. break;
  944. }
  945. rsp_q->ring_index++;
  946. if (rsp_q->ring_index == rsp_q->length) {
  947. rsp_q->ring_index = 0;
  948. rsp_q->ring_ptr = rsp_q->ring;
  949. } else {
  950. rsp_q->ring_ptr++;
  951. }
  952. no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
  953. sizeof(new_pkt->data) : pending_bytes;
  954. if ((buffer_copy_offset + no_bytes) <= total_bytes) {
  955. memcpy(((uint8_t *)fpin_pkt +
  956. buffer_copy_offset), new_pkt->data,
  957. no_bytes);
  958. buffer_copy_offset += no_bytes;
  959. pending_bytes -= no_bytes;
  960. --entry_count_remaining;
  961. } else {
  962. ql_log(ql_log_warn, vha, 0x5044,
  963. "Attempt to copy more that we got, optimizing..%x\n",
  964. buffer_copy_offset);
  965. memcpy(((uint8_t *)fpin_pkt +
  966. buffer_copy_offset), new_pkt->data,
  967. total_bytes - buffer_copy_offset);
  968. }
  969. ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
  970. wmb();
  971. }
  972. if (pending_bytes != 0 || entry_count_remaining != 0) {
  973. ql_log(ql_log_fatal, vha, 0x508b,
  974. "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
  975. total_bytes, entry_count_remaining);
  976. qla24xx_free_purex_item(item);
  977. return NULL;
  978. }
  979. } while (entry_count_remaining > 0);
  980. host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
  981. return item;
  982. }
  983. /**
  984. * qla2x00_async_event() - Process aynchronous events.
  985. * @vha: SCSI driver HA context
  986. * @rsp: response queue
  987. * @mb: Mailbox registers (0 - 3)
  988. */
  989. void
  990. qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
  991. {
  992. uint16_t handle_cnt;
  993. uint16_t cnt, mbx;
  994. uint32_t handles[5];
  995. struct qla_hw_data *ha = vha->hw;
  996. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  997. struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
  998. struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
  999. uint32_t rscn_entry, host_pid;
  1000. unsigned long flags;
  1001. fc_port_t *fcport = NULL;
  1002. if (!vha->hw->flags.fw_started) {
  1003. ql_log(ql_log_warn, vha, 0x50ff,
  1004. "Dropping AEN - %04x %04x %04x %04x.\n",
  1005. mb[0], mb[1], mb[2], mb[3]);
  1006. return;
  1007. }
  1008. /* Setup to process RIO completion. */
  1009. handle_cnt = 0;
  1010. if (IS_CNA_CAPABLE(ha))
  1011. goto skip_rio;
  1012. switch (mb[0]) {
  1013. case MBA_SCSI_COMPLETION:
  1014. handles[0] = make_handle(mb[2], mb[1]);
  1015. handle_cnt = 1;
  1016. break;
  1017. case MBA_CMPLT_1_16BIT:
  1018. handles[0] = mb[1];
  1019. handle_cnt = 1;
  1020. mb[0] = MBA_SCSI_COMPLETION;
  1021. break;
  1022. case MBA_CMPLT_2_16BIT:
  1023. handles[0] = mb[1];
  1024. handles[1] = mb[2];
  1025. handle_cnt = 2;
  1026. mb[0] = MBA_SCSI_COMPLETION;
  1027. break;
  1028. case MBA_CMPLT_3_16BIT:
  1029. handles[0] = mb[1];
  1030. handles[1] = mb[2];
  1031. handles[2] = mb[3];
  1032. handle_cnt = 3;
  1033. mb[0] = MBA_SCSI_COMPLETION;
  1034. break;
  1035. case MBA_CMPLT_4_16BIT:
  1036. handles[0] = mb[1];
  1037. handles[1] = mb[2];
  1038. handles[2] = mb[3];
  1039. handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
  1040. handle_cnt = 4;
  1041. mb[0] = MBA_SCSI_COMPLETION;
  1042. break;
  1043. case MBA_CMPLT_5_16BIT:
  1044. handles[0] = mb[1];
  1045. handles[1] = mb[2];
  1046. handles[2] = mb[3];
  1047. handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
  1048. handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
  1049. handle_cnt = 5;
  1050. mb[0] = MBA_SCSI_COMPLETION;
  1051. break;
  1052. case MBA_CMPLT_2_32BIT:
  1053. handles[0] = make_handle(mb[2], mb[1]);
  1054. handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
  1055. RD_MAILBOX_REG(ha, reg, 6));
  1056. handle_cnt = 2;
  1057. mb[0] = MBA_SCSI_COMPLETION;
  1058. break;
  1059. default:
  1060. break;
  1061. }
  1062. skip_rio:
  1063. switch (mb[0]) {
  1064. case MBA_SCSI_COMPLETION: /* Fast Post */
  1065. if (!vha->flags.online)
  1066. break;
  1067. for (cnt = 0; cnt < handle_cnt; cnt++)
  1068. qla2x00_process_completed_request(vha, rsp->req,
  1069. handles[cnt]);
  1070. break;
  1071. case MBA_RESET: /* Reset */
  1072. ql_dbg(ql_dbg_async, vha, 0x5002,
  1073. "Asynchronous RESET.\n");
  1074. set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  1075. break;
  1076. case MBA_SYSTEM_ERR: /* System Error */
  1077. mbx = 0;
  1078. vha->hw_err_cnt++;
  1079. if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
  1080. IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
  1081. u16 m[4];
  1082. m[0] = rd_reg_word(&reg24->mailbox4);
  1083. m[1] = rd_reg_word(&reg24->mailbox5);
  1084. m[2] = rd_reg_word(&reg24->mailbox6);
  1085. mbx = m[3] = rd_reg_word(&reg24->mailbox7);
  1086. ql_log(ql_log_warn, vha, 0x5003,
  1087. "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
  1088. mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]);
  1089. } else
  1090. ql_log(ql_log_warn, vha, 0x5003,
  1091. "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
  1092. mb[1], mb[2], mb[3]);
  1093. if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
  1094. rd_reg_word(&reg24->mailbox7) & BIT_8)
  1095. ha->isp_ops->mpi_fw_dump(vha, 1);
  1096. ha->isp_ops->fw_dump(vha);
  1097. ha->flags.fw_init_done = 0;
  1098. QLA_FW_STOPPED(ha);
  1099. if (IS_FWI2_CAPABLE(ha)) {
  1100. if (mb[1] == 0 && mb[2] == 0) {
  1101. ql_log(ql_log_fatal, vha, 0x5004,
  1102. "Unrecoverable Hardware Error: adapter "
  1103. "marked OFFLINE!\n");
  1104. vha->flags.online = 0;
  1105. vha->device_flags |= DFLG_DEV_FAILED;
  1106. } else {
  1107. /* Check to see if MPI timeout occurred */
  1108. if ((mbx & MBX_3) && (ha->port_no == 0))
  1109. set_bit(MPI_RESET_NEEDED,
  1110. &vha->dpc_flags);
  1111. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1112. }
  1113. } else if (mb[1] == 0) {
  1114. ql_log(ql_log_fatal, vha, 0x5005,
  1115. "Unrecoverable Hardware Error: adapter marked "
  1116. "OFFLINE!\n");
  1117. vha->flags.online = 0;
  1118. vha->device_flags |= DFLG_DEV_FAILED;
  1119. } else
  1120. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1121. break;
  1122. case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
  1123. ql_log(ql_log_warn, vha, 0x5006,
  1124. "ISP Request Transfer Error (%x).\n", mb[1]);
  1125. vha->hw_err_cnt++;
  1126. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1127. break;
  1128. case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
  1129. ql_log(ql_log_warn, vha, 0x5007,
  1130. "ISP Response Transfer Error (%x).\n", mb[1]);
  1131. vha->hw_err_cnt++;
  1132. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1133. break;
  1134. case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
  1135. ql_dbg(ql_dbg_async, vha, 0x5008,
  1136. "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
  1137. break;
  1138. case MBA_LOOP_INIT_ERR:
  1139. ql_log(ql_log_warn, vha, 0x5090,
  1140. "LOOP INIT ERROR (%x).\n", mb[1]);
  1141. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1142. break;
  1143. case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
  1144. ha->flags.lip_ae = 1;
  1145. ql_dbg(ql_dbg_async, vha, 0x5009,
  1146. "LIP occurred (%x).\n", mb[1]);
  1147. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  1148. atomic_set(&vha->loop_state, LOOP_DOWN);
  1149. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  1150. qla2x00_mark_all_devices_lost(vha);
  1151. }
  1152. if (vha->vp_idx) {
  1153. atomic_set(&vha->vp_state, VP_FAILED);
  1154. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  1155. }
  1156. set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
  1157. set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
  1158. vha->flags.management_server_logged_in = 0;
  1159. qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
  1160. break;
  1161. case MBA_LOOP_UP: /* Loop Up Event */
  1162. if (IS_QLA2100(ha) || IS_QLA2200(ha))
  1163. ha->link_data_rate = PORT_SPEED_1GB;
  1164. else
  1165. ha->link_data_rate = mb[1];
  1166. ql_log(ql_log_info, vha, 0x500a,
  1167. "LOOP UP detected (%s Gbps).\n",
  1168. qla2x00_get_link_speed_str(ha, ha->link_data_rate));
  1169. if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
  1170. if (mb[2] & BIT_0)
  1171. ql_log(ql_log_info, vha, 0x11a0,
  1172. "FEC=enabled (link up).\n");
  1173. }
  1174. vha->flags.management_server_logged_in = 0;
  1175. qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
  1176. if (vha->link_down_time < vha->hw->port_down_retry_count) {
  1177. vha->short_link_down_cnt++;
  1178. vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
  1179. }
  1180. break;
  1181. case MBA_LOOP_DOWN: /* Loop Down Event */
  1182. SAVE_TOPO(ha);
  1183. ha->flags.lip_ae = 0;
  1184. ha->current_topology = 0;
  1185. vha->link_down_time = 0;
  1186. mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
  1187. ? rd_reg_word(&reg24->mailbox4) : 0;
  1188. mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(&reg82->mailbox_out[4])
  1189. : mbx;
  1190. ql_log(ql_log_info, vha, 0x500b,
  1191. "LOOP DOWN detected (%x %x %x %x).\n",
  1192. mb[1], mb[2], mb[3], mbx);
  1193. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  1194. atomic_set(&vha->loop_state, LOOP_DOWN);
  1195. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  1196. /*
  1197. * In case of loop down, restore WWPN from
  1198. * NVRAM in case of FA-WWPN capable ISP
  1199. * Restore for Physical Port only
  1200. */
  1201. if (!vha->vp_idx) {
  1202. if (ha->flags.fawwpn_enabled &&
  1203. (ha->current_topology == ISP_CFG_F)) {
  1204. memcpy(vha->port_name, ha->port_name, WWN_SIZE);
  1205. fc_host_port_name(vha->host) =
  1206. wwn_to_u64(vha->port_name);
  1207. ql_dbg(ql_dbg_init + ql_dbg_verbose,
  1208. vha, 0x00d8, "LOOP DOWN detected,"
  1209. "restore WWPN %016llx\n",
  1210. wwn_to_u64(vha->port_name));
  1211. }
  1212. clear_bit(VP_CONFIG_OK, &vha->vp_flags);
  1213. }
  1214. vha->device_flags |= DFLG_NO_CABLE;
  1215. qla2x00_mark_all_devices_lost(vha);
  1216. }
  1217. if (vha->vp_idx) {
  1218. atomic_set(&vha->vp_state, VP_FAILED);
  1219. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  1220. }
  1221. vha->flags.management_server_logged_in = 0;
  1222. ha->link_data_rate = PORT_SPEED_UNKNOWN;
  1223. qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
  1224. break;
  1225. case MBA_LIP_RESET: /* LIP reset occurred */
  1226. ql_dbg(ql_dbg_async, vha, 0x500c,
  1227. "LIP reset occurred (%x).\n", mb[1]);
  1228. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  1229. atomic_set(&vha->loop_state, LOOP_DOWN);
  1230. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  1231. qla2x00_mark_all_devices_lost(vha);
  1232. }
  1233. if (vha->vp_idx) {
  1234. atomic_set(&vha->vp_state, VP_FAILED);
  1235. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  1236. }
  1237. set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  1238. ha->operating_mode = LOOP;
  1239. vha->flags.management_server_logged_in = 0;
  1240. qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
  1241. break;
  1242. /* case MBA_DCBX_COMPLETE: */
  1243. case MBA_POINT_TO_POINT: /* Point-to-Point */
  1244. ha->flags.lip_ae = 0;
  1245. if (IS_QLA2100(ha))
  1246. break;
  1247. if (IS_CNA_CAPABLE(ha)) {
  1248. ql_dbg(ql_dbg_async, vha, 0x500d,
  1249. "DCBX Completed -- %04x %04x %04x.\n",
  1250. mb[1], mb[2], mb[3]);
  1251. if (ha->notify_dcbx_comp && !vha->vp_idx)
  1252. complete(&ha->dcbx_comp);
  1253. } else
  1254. ql_dbg(ql_dbg_async, vha, 0x500e,
  1255. "Asynchronous P2P MODE received.\n");
  1256. /*
  1257. * Until there's a transition from loop down to loop up, treat
  1258. * this as loop down only.
  1259. */
  1260. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  1261. atomic_set(&vha->loop_state, LOOP_DOWN);
  1262. if (!atomic_read(&vha->loop_down_timer))
  1263. atomic_set(&vha->loop_down_timer,
  1264. LOOP_DOWN_TIME);
  1265. if (!N2N_TOPO(ha))
  1266. qla2x00_mark_all_devices_lost(vha);
  1267. }
  1268. if (vha->vp_idx) {
  1269. atomic_set(&vha->vp_state, VP_FAILED);
  1270. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  1271. }
  1272. if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
  1273. set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  1274. set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
  1275. set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
  1276. vha->flags.management_server_logged_in = 0;
  1277. break;
  1278. case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
  1279. if (IS_QLA2100(ha))
  1280. break;
  1281. ql_dbg(ql_dbg_async, vha, 0x500f,
  1282. "Configuration change detected: value=%x.\n", mb[1]);
  1283. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  1284. atomic_set(&vha->loop_state, LOOP_DOWN);
  1285. if (!atomic_read(&vha->loop_down_timer))
  1286. atomic_set(&vha->loop_down_timer,
  1287. LOOP_DOWN_TIME);
  1288. qla2x00_mark_all_devices_lost(vha);
  1289. }
  1290. if (vha->vp_idx) {
  1291. atomic_set(&vha->vp_state, VP_FAILED);
  1292. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  1293. }
  1294. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  1295. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  1296. break;
  1297. case MBA_PORT_UPDATE: /* Port database update */
  1298. /*
  1299. * Handle only global and vn-port update events
  1300. *
  1301. * Relevant inputs:
  1302. * mb[1] = N_Port handle of changed port
  1303. * OR 0xffff for global event
  1304. * mb[2] = New login state
  1305. * 7 = Port logged out
  1306. * mb[3] = LSB is vp_idx, 0xff = all vps
  1307. *
  1308. * Skip processing if:
  1309. * Event is global, vp_idx is NOT all vps,
  1310. * vp_idx does not match
  1311. * Event is not global, vp_idx does not match
  1312. */
  1313. if (IS_QLA2XXX_MIDTYPE(ha) &&
  1314. ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
  1315. (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
  1316. break;
  1317. if (mb[2] == 0x7) {
  1318. ql_dbg(ql_dbg_async, vha, 0x5010,
  1319. "Port %s %04x %04x %04x.\n",
  1320. mb[1] == 0xffff ? "unavailable" : "logout",
  1321. mb[1], mb[2], mb[3]);
  1322. if (mb[1] == 0xffff)
  1323. goto global_port_update;
  1324. if (mb[1] == NPH_SNS_LID(ha)) {
  1325. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  1326. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  1327. break;
  1328. }
  1329. /* use handle_cnt for loop id/nport handle */
  1330. if (IS_FWI2_CAPABLE(ha))
  1331. handle_cnt = NPH_SNS;
  1332. else
  1333. handle_cnt = SIMPLE_NAME_SERVER;
  1334. if (mb[1] == handle_cnt) {
  1335. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  1336. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  1337. break;
  1338. }
  1339. /* Port logout */
  1340. fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
  1341. if (!fcport)
  1342. break;
  1343. if (atomic_read(&fcport->state) != FCS_ONLINE)
  1344. break;
  1345. ql_dbg(ql_dbg_async, vha, 0x508a,
  1346. "Marking port lost loopid=%04x portid=%06x.\n",
  1347. fcport->loop_id, fcport->d_id.b24);
  1348. if (qla_ini_mode_enabled(vha)) {
  1349. fcport->logout_on_delete = 0;
  1350. qlt_schedule_sess_for_deletion(fcport);
  1351. }
  1352. break;
  1353. global_port_update:
  1354. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  1355. atomic_set(&vha->loop_state, LOOP_DOWN);
  1356. atomic_set(&vha->loop_down_timer,
  1357. LOOP_DOWN_TIME);
  1358. vha->device_flags |= DFLG_NO_CABLE;
  1359. qla2x00_mark_all_devices_lost(vha);
  1360. }
  1361. if (vha->vp_idx) {
  1362. atomic_set(&vha->vp_state, VP_FAILED);
  1363. fc_vport_set_state(vha->fc_vport,
  1364. FC_VPORT_FAILED);
  1365. qla2x00_mark_all_devices_lost(vha);
  1366. }
  1367. vha->flags.management_server_logged_in = 0;
  1368. ha->link_data_rate = PORT_SPEED_UNKNOWN;
  1369. break;
  1370. }
  1371. /*
  1372. * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
  1373. * event etc. earlier indicating loop is down) then process
  1374. * it. Otherwise ignore it and Wait for RSCN to come in.
  1375. */
  1376. atomic_set(&vha->loop_down_timer, 0);
  1377. if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
  1378. !ha->flags.n2n_ae &&
  1379. atomic_read(&vha->loop_state) != LOOP_DEAD) {
  1380. ql_dbg(ql_dbg_async, vha, 0x5011,
  1381. "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
  1382. mb[1], mb[2], mb[3]);
  1383. break;
  1384. }
  1385. ql_dbg(ql_dbg_async, vha, 0x5012,
  1386. "Port database changed %04x %04x %04x.\n",
  1387. mb[1], mb[2], mb[3]);
  1388. /*
  1389. * Mark all devices as missing so we will login again.
  1390. */
  1391. atomic_set(&vha->loop_state, LOOP_UP);
  1392. vha->scan.scan_retry = 0;
  1393. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  1394. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  1395. set_bit(VP_CONFIG_OK, &vha->vp_flags);
  1396. break;
  1397. case MBA_RSCN_UPDATE: /* State Change Registration */
  1398. /* Check if the Vport has issued a SCR */
  1399. if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
  1400. break;
  1401. /* Only handle SCNs for our Vport index. */
  1402. if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
  1403. break;
  1404. ql_log(ql_log_warn, vha, 0x5013,
  1405. "RSCN database changed -- %04x %04x %04x.\n",
  1406. mb[1], mb[2], mb[3]);
  1407. rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
  1408. host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
  1409. | vha->d_id.b.al_pa;
  1410. if (rscn_entry == host_pid) {
  1411. ql_dbg(ql_dbg_async, vha, 0x5014,
  1412. "Ignoring RSCN update to local host "
  1413. "port ID (%06x).\n", host_pid);
  1414. break;
  1415. }
  1416. /* Ignore reserved bits from RSCN-payload. */
  1417. rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
  1418. /* Skip RSCNs for virtual ports on the same physical port */
  1419. if (qla2x00_is_a_vp_did(vha, rscn_entry))
  1420. break;
  1421. atomic_set(&vha->loop_down_timer, 0);
  1422. vha->flags.management_server_logged_in = 0;
  1423. {
  1424. struct event_arg ea;
  1425. memset(&ea, 0, sizeof(ea));
  1426. ea.id.b24 = rscn_entry;
  1427. ea.id.b.rsvd_1 = rscn_entry >> 24;
  1428. qla2x00_handle_rscn(vha, &ea);
  1429. qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
  1430. }
  1431. break;
  1432. case MBA_CONGN_NOTI_RECV:
  1433. if (!ha->flags.scm_enabled ||
  1434. mb[1] != QLA_CON_PRIMITIVE_RECEIVED)
  1435. break;
  1436. if (mb[2] == QLA_CONGESTION_ARB_WARNING) {
  1437. ql_dbg(ql_dbg_async, vha, 0x509b,
  1438. "Congestion Warning %04x %04x.\n", mb[1], mb[2]);
  1439. } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) {
  1440. ql_log(ql_log_warn, vha, 0x509b,
  1441. "Congestion Alarm %04x %04x.\n", mb[1], mb[2]);
  1442. }
  1443. break;
  1444. /* case MBA_RIO_RESPONSE: */
  1445. case MBA_ZIO_RESPONSE:
  1446. ql_dbg(ql_dbg_async, vha, 0x5015,
  1447. "[R|Z]IO update completion.\n");
  1448. if (IS_FWI2_CAPABLE(ha))
  1449. qla24xx_process_response_queue(vha, rsp);
  1450. else
  1451. qla2x00_process_response_queue(rsp);
  1452. break;
  1453. case MBA_DISCARD_RND_FRAME:
  1454. ql_dbg(ql_dbg_async, vha, 0x5016,
  1455. "Discard RND Frame -- %04x %04x %04x.\n",
  1456. mb[1], mb[2], mb[3]);
  1457. vha->interface_err_cnt++;
  1458. break;
  1459. case MBA_TRACE_NOTIFICATION:
  1460. ql_dbg(ql_dbg_async, vha, 0x5017,
  1461. "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
  1462. break;
  1463. case MBA_ISP84XX_ALERT:
  1464. ql_dbg(ql_dbg_async, vha, 0x5018,
  1465. "ISP84XX Alert Notification -- %04x %04x %04x.\n",
  1466. mb[1], mb[2], mb[3]);
  1467. spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
  1468. switch (mb[1]) {
  1469. case A84_PANIC_RECOVERY:
  1470. ql_log(ql_log_info, vha, 0x5019,
  1471. "Alert 84XX: panic recovery %04x %04x.\n",
  1472. mb[2], mb[3]);
  1473. break;
  1474. case A84_OP_LOGIN_COMPLETE:
  1475. ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
  1476. ql_log(ql_log_info, vha, 0x501a,
  1477. "Alert 84XX: firmware version %x.\n",
  1478. ha->cs84xx->op_fw_version);
  1479. break;
  1480. case A84_DIAG_LOGIN_COMPLETE:
  1481. ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
  1482. ql_log(ql_log_info, vha, 0x501b,
  1483. "Alert 84XX: diagnostic firmware version %x.\n",
  1484. ha->cs84xx->diag_fw_version);
  1485. break;
  1486. case A84_GOLD_LOGIN_COMPLETE:
  1487. ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
  1488. ha->cs84xx->fw_update = 1;
  1489. ql_log(ql_log_info, vha, 0x501c,
  1490. "Alert 84XX: gold firmware version %x.\n",
  1491. ha->cs84xx->gold_fw_version);
  1492. break;
  1493. default:
  1494. ql_log(ql_log_warn, vha, 0x501d,
  1495. "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
  1496. mb[1], mb[2], mb[3]);
  1497. }
  1498. spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
  1499. break;
  1500. case MBA_DCBX_START:
  1501. ql_dbg(ql_dbg_async, vha, 0x501e,
  1502. "DCBX Started -- %04x %04x %04x.\n",
  1503. mb[1], mb[2], mb[3]);
  1504. break;
  1505. case MBA_DCBX_PARAM_UPDATE:
  1506. ql_dbg(ql_dbg_async, vha, 0x501f,
  1507. "DCBX Parameters Updated -- %04x %04x %04x.\n",
  1508. mb[1], mb[2], mb[3]);
  1509. break;
  1510. case MBA_FCF_CONF_ERR:
  1511. ql_dbg(ql_dbg_async, vha, 0x5020,
  1512. "FCF Configuration Error -- %04x %04x %04x.\n",
  1513. mb[1], mb[2], mb[3]);
  1514. break;
  1515. case MBA_IDC_NOTIFY:
  1516. if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
  1517. mb[4] = rd_reg_word(&reg24->mailbox4);
  1518. if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
  1519. (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
  1520. (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
  1521. set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
  1522. /*
  1523. * Extend loop down timer since port is active.
  1524. */
  1525. if (atomic_read(&vha->loop_state) == LOOP_DOWN)
  1526. atomic_set(&vha->loop_down_timer,
  1527. LOOP_DOWN_TIME);
  1528. qla2xxx_wake_dpc(vha);
  1529. }
  1530. }
  1531. fallthrough;
  1532. case MBA_IDC_COMPLETE:
  1533. if (ha->notify_lb_portup_comp && !vha->vp_idx)
  1534. complete(&ha->lb_portup_comp);
  1535. fallthrough;
  1536. case MBA_IDC_TIME_EXT:
  1537. if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
  1538. IS_QLA8044(ha))
  1539. qla81xx_idc_event(vha, mb[0], mb[1]);
  1540. break;
  1541. case MBA_IDC_AEN:
  1542. if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
  1543. vha->hw_err_cnt++;
  1544. qla27xx_handle_8200_aen(vha, mb);
  1545. } else if (IS_QLA83XX(ha)) {
  1546. mb[4] = rd_reg_word(&reg24->mailbox4);
  1547. mb[5] = rd_reg_word(&reg24->mailbox5);
  1548. mb[6] = rd_reg_word(&reg24->mailbox6);
  1549. mb[7] = rd_reg_word(&reg24->mailbox7);
  1550. qla83xx_handle_8200_aen(vha, mb);
  1551. } else {
  1552. ql_dbg(ql_dbg_async, vha, 0x5052,
  1553. "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
  1554. mb[0], mb[1], mb[2], mb[3]);
  1555. }
  1556. break;
  1557. case MBA_DPORT_DIAGNOSTICS:
  1558. if ((mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_NOERR ||
  1559. (mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_ERR)
  1560. vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
  1561. ql_dbg(ql_dbg_async, vha, 0x5052,
  1562. "D-Port Diagnostics: %04x %04x %04x %04x\n",
  1563. mb[0], mb[1], mb[2], mb[3]);
  1564. memcpy(vha->dport_data, mb, sizeof(vha->dport_data));
  1565. if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
  1566. static char *results[] = {
  1567. "start", "done(pass)", "done(error)", "undefined" };
  1568. static char *types[] = {
  1569. "none", "dynamic", "static", "other" };
  1570. uint result = mb[1] >> 0 & 0x3;
  1571. uint type = mb[1] >> 6 & 0x3;
  1572. uint sw = mb[1] >> 15 & 0x1;
  1573. ql_dbg(ql_dbg_async, vha, 0x5052,
  1574. "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
  1575. results[result], types[type], sw);
  1576. if (result == 2) {
  1577. static char *reasons[] = {
  1578. "reserved", "unexpected reject",
  1579. "unexpected phase", "retry exceeded",
  1580. "timed out", "not supported",
  1581. "user stopped" };
  1582. uint reason = mb[2] >> 0 & 0xf;
  1583. uint phase = mb[2] >> 12 & 0xf;
  1584. ql_dbg(ql_dbg_async, vha, 0x5052,
  1585. "D-Port Diagnostics: reason=%s phase=%u \n",
  1586. reason < 7 ? reasons[reason] : "other",
  1587. phase >> 1);
  1588. }
  1589. }
  1590. break;
  1591. case MBA_TEMPERATURE_ALERT:
  1592. ql_dbg(ql_dbg_async, vha, 0x505e,
  1593. "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
  1594. break;
  1595. case MBA_TRANS_INSERT:
  1596. ql_dbg(ql_dbg_async, vha, 0x5091,
  1597. "Transceiver Insertion: %04x\n", mb[1]);
  1598. set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
  1599. break;
  1600. case MBA_TRANS_REMOVE:
  1601. ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n");
  1602. break;
  1603. default:
  1604. ql_dbg(ql_dbg_async, vha, 0x5057,
  1605. "Unknown AEN:%04x %04x %04x %04x\n",
  1606. mb[0], mb[1], mb[2], mb[3]);
  1607. }
  1608. qlt_async_event(mb[0], vha, mb);
  1609. if (!vha->vp_idx && ha->num_vhosts)
  1610. qla2x00_alert_all_vps(rsp, mb);
  1611. }
  1612. /**
  1613. * qla2x00_process_completed_request() - Process a Fast Post response.
  1614. * @vha: SCSI driver HA context
  1615. * @req: request queue
  1616. * @index: SRB index
  1617. */
  1618. void
  1619. qla2x00_process_completed_request(struct scsi_qla_host *vha,
  1620. struct req_que *req, uint32_t index)
  1621. {
  1622. srb_t *sp;
  1623. struct qla_hw_data *ha = vha->hw;
  1624. /* Validate handle. */
  1625. if (index >= req->num_outstanding_cmds) {
  1626. ql_log(ql_log_warn, vha, 0x3014,
  1627. "Invalid SCSI command index (%x).\n", index);
  1628. if (IS_P3P_TYPE(ha))
  1629. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  1630. else
  1631. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1632. return;
  1633. }
  1634. sp = req->outstanding_cmds[index];
  1635. if (sp) {
  1636. /* Free outstanding command slot. */
  1637. req->outstanding_cmds[index] = NULL;
  1638. /* Save ISP completion status */
  1639. sp->done(sp, DID_OK << 16);
  1640. } else {
  1641. ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
  1642. if (IS_P3P_TYPE(ha))
  1643. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  1644. else
  1645. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1646. }
  1647. }
  1648. static srb_t *
  1649. qla_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
  1650. struct req_que *req, void *iocb, u16 *ret_index)
  1651. {
  1652. struct qla_hw_data *ha = vha->hw;
  1653. sts_entry_t *pkt = iocb;
  1654. srb_t *sp;
  1655. uint16_t index;
  1656. if (pkt->handle == QLA_SKIP_HANDLE)
  1657. return NULL;
  1658. index = LSW(pkt->handle);
  1659. if (index >= req->num_outstanding_cmds) {
  1660. ql_log(ql_log_warn, vha, 0x5031,
  1661. "%s: Invalid command index (%x) type %8ph.\n",
  1662. func, index, iocb);
  1663. if (IS_P3P_TYPE(ha))
  1664. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  1665. else
  1666. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1667. return NULL;
  1668. }
  1669. sp = req->outstanding_cmds[index];
  1670. if (!sp) {
  1671. ql_log(ql_log_warn, vha, 0x5032,
  1672. "%s: Invalid completion handle (%x) -- timed-out.\n",
  1673. func, index);
  1674. return NULL;
  1675. }
  1676. if (sp->handle != index) {
  1677. ql_log(ql_log_warn, vha, 0x5033,
  1678. "%s: SRB handle (%x) mismatch %x.\n", func,
  1679. sp->handle, index);
  1680. return NULL;
  1681. }
  1682. *ret_index = index;
  1683. qla_put_fw_resources(sp->qpair, &sp->iores);
  1684. return sp;
  1685. }
  1686. srb_t *
  1687. qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
  1688. struct req_que *req, void *iocb)
  1689. {
  1690. uint16_t index;
  1691. srb_t *sp;
  1692. sp = qla_get_sp_from_handle(vha, func, req, iocb, &index);
  1693. if (sp)
  1694. req->outstanding_cmds[index] = NULL;
  1695. return sp;
  1696. }
  1697. static void
  1698. qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
  1699. struct mbx_entry *mbx)
  1700. {
  1701. const char func[] = "MBX-IOCB";
  1702. const char *type;
  1703. fc_port_t *fcport;
  1704. srb_t *sp;
  1705. struct srb_iocb *lio;
  1706. uint16_t *data;
  1707. uint16_t status;
  1708. sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
  1709. if (!sp)
  1710. return;
  1711. lio = &sp->u.iocb_cmd;
  1712. type = sp->name;
  1713. fcport = sp->fcport;
  1714. data = lio->u.logio.data;
  1715. data[0] = MBS_COMMAND_ERROR;
  1716. data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
  1717. QLA_LOGIO_LOGIN_RETRIED : 0;
  1718. if (mbx->entry_status) {
  1719. ql_dbg(ql_dbg_async, vha, 0x5043,
  1720. "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
  1721. "entry-status=%x status=%x state-flag=%x "
  1722. "status-flags=%x.\n", type, sp->handle,
  1723. fcport->d_id.b.domain, fcport->d_id.b.area,
  1724. fcport->d_id.b.al_pa, mbx->entry_status,
  1725. le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
  1726. le16_to_cpu(mbx->status_flags));
  1727. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
  1728. mbx, sizeof(*mbx));
  1729. goto logio_done;
  1730. }
  1731. status = le16_to_cpu(mbx->status);
  1732. if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
  1733. le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
  1734. status = 0;
  1735. if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
  1736. ql_dbg(ql_dbg_async, vha, 0x5045,
  1737. "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
  1738. type, sp->handle, fcport->d_id.b.domain,
  1739. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  1740. le16_to_cpu(mbx->mb1));
  1741. data[0] = MBS_COMMAND_COMPLETE;
  1742. if (sp->type == SRB_LOGIN_CMD) {
  1743. fcport->port_type = FCT_TARGET;
  1744. if (le16_to_cpu(mbx->mb1) & BIT_0)
  1745. fcport->port_type = FCT_INITIATOR;
  1746. else if (le16_to_cpu(mbx->mb1) & BIT_1)
  1747. fcport->flags |= FCF_FCP2_DEVICE;
  1748. }
  1749. goto logio_done;
  1750. }
  1751. data[0] = le16_to_cpu(mbx->mb0);
  1752. switch (data[0]) {
  1753. case MBS_PORT_ID_USED:
  1754. data[1] = le16_to_cpu(mbx->mb1);
  1755. break;
  1756. case MBS_LOOP_ID_USED:
  1757. break;
  1758. default:
  1759. data[0] = MBS_COMMAND_ERROR;
  1760. break;
  1761. }
  1762. ql_log(ql_log_warn, vha, 0x5046,
  1763. "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
  1764. "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
  1765. fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
  1766. status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
  1767. le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
  1768. le16_to_cpu(mbx->mb7));
  1769. logio_done:
  1770. sp->done(sp, 0);
  1771. }
  1772. static void
  1773. qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
  1774. struct mbx_24xx_entry *pkt)
  1775. {
  1776. const char func[] = "MBX-IOCB2";
  1777. struct qla_hw_data *ha = vha->hw;
  1778. srb_t *sp;
  1779. struct srb_iocb *si;
  1780. u16 sz, i;
  1781. int res;
  1782. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  1783. if (!sp)
  1784. return;
  1785. if (sp->type == SRB_SCSI_CMD ||
  1786. sp->type == SRB_NVME_CMD ||
  1787. sp->type == SRB_TM_CMD) {
  1788. ql_log(ql_log_warn, vha, 0x509d,
  1789. "Inconsistent event entry type %d\n", sp->type);
  1790. if (IS_P3P_TYPE(ha))
  1791. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  1792. else
  1793. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1794. return;
  1795. }
  1796. si = &sp->u.iocb_cmd;
  1797. sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
  1798. for (i = 0; i < sz; i++)
  1799. si->u.mbx.in_mb[i] = pkt->mb[i];
  1800. res = (si->u.mbx.in_mb[0] & MBS_MASK);
  1801. sp->done(sp, res);
  1802. }
  1803. static void
  1804. qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
  1805. struct nack_to_isp *pkt)
  1806. {
  1807. const char func[] = "nack";
  1808. srb_t *sp;
  1809. int res = 0;
  1810. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  1811. if (!sp)
  1812. return;
  1813. if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
  1814. res = QLA_FUNCTION_FAILED;
  1815. sp->done(sp, res);
  1816. }
  1817. static void
  1818. qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
  1819. sts_entry_t *pkt, int iocb_type)
  1820. {
  1821. const char func[] = "CT_IOCB";
  1822. const char *type;
  1823. srb_t *sp;
  1824. struct bsg_job *bsg_job;
  1825. struct fc_bsg_reply *bsg_reply;
  1826. uint16_t comp_status;
  1827. int res = 0;
  1828. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  1829. if (!sp)
  1830. return;
  1831. switch (sp->type) {
  1832. case SRB_CT_CMD:
  1833. bsg_job = sp->u.bsg_job;
  1834. bsg_reply = bsg_job->reply;
  1835. type = "ct pass-through";
  1836. comp_status = le16_to_cpu(pkt->comp_status);
  1837. /*
  1838. * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
  1839. * fc payload to the caller
  1840. */
  1841. bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
  1842. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1843. if (comp_status != CS_COMPLETE) {
  1844. if (comp_status == CS_DATA_UNDERRUN) {
  1845. res = DID_OK << 16;
  1846. bsg_reply->reply_payload_rcv_len =
  1847. le16_to_cpu(pkt->rsp_info_len);
  1848. ql_log(ql_log_warn, vha, 0x5048,
  1849. "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
  1850. type, comp_status,
  1851. bsg_reply->reply_payload_rcv_len);
  1852. } else {
  1853. ql_log(ql_log_warn, vha, 0x5049,
  1854. "CT pass-through-%s error comp_status=0x%x.\n",
  1855. type, comp_status);
  1856. res = DID_ERROR << 16;
  1857. bsg_reply->reply_payload_rcv_len = 0;
  1858. }
  1859. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
  1860. pkt, sizeof(*pkt));
  1861. } else {
  1862. res = DID_OK << 16;
  1863. bsg_reply->reply_payload_rcv_len =
  1864. bsg_job->reply_payload.payload_len;
  1865. bsg_job->reply_len = 0;
  1866. }
  1867. break;
  1868. case SRB_CT_PTHRU_CMD:
  1869. /*
  1870. * borrowing sts_entry_24xx.comp_status.
  1871. * same location as ct_entry_24xx.comp_status
  1872. */
  1873. res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
  1874. (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
  1875. sp->name);
  1876. break;
  1877. }
  1878. sp->done(sp, res);
  1879. }
  1880. static void
  1881. qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
  1882. struct sts_entry_24xx *pkt, int iocb_type)
  1883. {
  1884. struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
  1885. const char func[] = "ELS_CT_IOCB";
  1886. const char *type;
  1887. srb_t *sp;
  1888. struct bsg_job *bsg_job;
  1889. struct fc_bsg_reply *bsg_reply;
  1890. uint16_t comp_status;
  1891. uint32_t fw_status[3];
  1892. int res, logit = 1;
  1893. struct srb_iocb *els;
  1894. uint n;
  1895. scsi_qla_host_t *vha;
  1896. struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt;
  1897. sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
  1898. if (!sp)
  1899. return;
  1900. bsg_job = sp->u.bsg_job;
  1901. vha = sp->vha;
  1902. type = NULL;
  1903. comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
  1904. fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
  1905. fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
  1906. switch (sp->type) {
  1907. case SRB_ELS_CMD_RPT:
  1908. case SRB_ELS_CMD_HST:
  1909. type = "rpt hst";
  1910. break;
  1911. case SRB_ELS_CMD_HST_NOLOGIN:
  1912. type = "els";
  1913. {
  1914. struct els_entry_24xx *els = (void *)pkt;
  1915. struct qla_bsg_auth_els_request *p =
  1916. (struct qla_bsg_auth_els_request *)bsg_job->request;
  1917. ql_dbg(ql_dbg_user, vha, 0x700f,
  1918. "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n",
  1919. __func__, sc_to_str(p->e.sub_cmd),
  1920. e->d_id[2], e->d_id[1], e->d_id[0],
  1921. comp_status, p->e.extra_rx_xchg_address, bsg_job);
  1922. if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) {
  1923. if (sp->remap.remapped) {
  1924. n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  1925. bsg_job->reply_payload.sg_cnt,
  1926. sp->remap.rsp.buf,
  1927. sp->remap.rsp.len);
  1928. ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e,
  1929. "%s: SG copied %x of %x\n",
  1930. __func__, n, sp->remap.rsp.len);
  1931. } else {
  1932. ql_dbg(ql_dbg_user, vha, 0x700f,
  1933. "%s: NOT REMAPPED (error)...!!!\n",
  1934. __func__);
  1935. }
  1936. }
  1937. }
  1938. break;
  1939. case SRB_CT_CMD:
  1940. type = "ct pass-through";
  1941. break;
  1942. case SRB_ELS_DCMD:
  1943. type = "Driver ELS logo";
  1944. if (iocb_type != ELS_IOCB_TYPE) {
  1945. ql_dbg(ql_dbg_user, vha, 0x5047,
  1946. "Completing %s: (%p) type=%d.\n",
  1947. type, sp, sp->type);
  1948. sp->done(sp, 0);
  1949. return;
  1950. }
  1951. break;
  1952. case SRB_CT_PTHRU_CMD:
  1953. /* borrowing sts_entry_24xx.comp_status.
  1954. same location as ct_entry_24xx.comp_status
  1955. */
  1956. res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
  1957. (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
  1958. sp->name);
  1959. sp->done(sp, res);
  1960. return;
  1961. default:
  1962. ql_dbg(ql_dbg_user, vha, 0x503e,
  1963. "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
  1964. return;
  1965. }
  1966. if (iocb_type == ELS_IOCB_TYPE) {
  1967. els = &sp->u.iocb_cmd;
  1968. els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
  1969. els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
  1970. els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
  1971. els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
  1972. if (comp_status == CS_COMPLETE) {
  1973. res = DID_OK << 16;
  1974. } else {
  1975. if (comp_status == CS_DATA_UNDERRUN) {
  1976. res = DID_OK << 16;
  1977. els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
  1978. ese->total_byte_count));
  1979. if (sp->remap.remapped &&
  1980. ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) {
  1981. ql_dbg(ql_dbg_user, vha, 0x503f,
  1982. "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x",
  1983. __func__, e->s_id[0], e->s_id[2], e->s_id[1],
  1984. e->d_id[2], e->d_id[1], e->d_id[0]);
  1985. logit = 0;
  1986. }
  1987. } else if (comp_status == CS_PORT_LOGGED_OUT) {
  1988. ql_dbg(ql_dbg_disc, vha, 0x911e,
  1989. "%s %d schedule session deletion\n",
  1990. __func__, __LINE__);
  1991. els->u.els_plogi.len = 0;
  1992. res = DID_IMM_RETRY << 16;
  1993. qlt_schedule_sess_for_deletion(sp->fcport);
  1994. } else {
  1995. els->u.els_plogi.len = 0;
  1996. res = DID_ERROR << 16;
  1997. }
  1998. if (sp->remap.remapped &&
  1999. ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
  2000. if (logit) {
  2001. ql_dbg(ql_dbg_user, vha, 0x503f,
  2002. "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
  2003. type, sp->handle, comp_status);
  2004. ql_dbg(ql_dbg_user, vha, 0x503f,
  2005. "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
  2006. fw_status[1], fw_status[2],
  2007. le32_to_cpu(((struct els_sts_entry_24xx *)
  2008. pkt)->total_byte_count),
  2009. e->s_id[0], e->s_id[2], e->s_id[1],
  2010. e->d_id[2], e->d_id[1], e->d_id[0]);
  2011. }
  2012. if (sp->fcport && sp->fcport->flags & FCF_FCSP_DEVICE &&
  2013. sp->type == SRB_ELS_CMD_HST_NOLOGIN) {
  2014. ql_dbg(ql_dbg_edif, vha, 0x911e,
  2015. "%s rcv reject. Sched delete\n", __func__);
  2016. qlt_schedule_sess_for_deletion(sp->fcport);
  2017. }
  2018. } else if (logit) {
  2019. ql_log(ql_log_info, vha, 0x503f,
  2020. "%s IOCB Done hdl=%x comp_status=0x%x\n",
  2021. type, sp->handle, comp_status);
  2022. ql_log(ql_log_info, vha, 0x503f,
  2023. "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
  2024. fw_status[1], fw_status[2],
  2025. le32_to_cpu(((struct els_sts_entry_24xx *)
  2026. pkt)->total_byte_count),
  2027. e->s_id[0], e->s_id[2], e->s_id[1],
  2028. e->d_id[2], e->d_id[1], e->d_id[0]);
  2029. }
  2030. }
  2031. goto els_ct_done;
  2032. }
  2033. /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
  2034. * fc payload to the caller
  2035. */
  2036. bsg_job = sp->u.bsg_job;
  2037. bsg_reply = bsg_job->reply;
  2038. bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
  2039. bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
  2040. if (comp_status != CS_COMPLETE) {
  2041. if (comp_status == CS_DATA_UNDERRUN) {
  2042. res = DID_OK << 16;
  2043. bsg_reply->reply_payload_rcv_len =
  2044. le32_to_cpu(ese->total_byte_count);
  2045. ql_dbg(ql_dbg_user, vha, 0x503f,
  2046. "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
  2047. "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
  2048. type, sp->handle, comp_status, fw_status[1], fw_status[2],
  2049. le32_to_cpu(ese->total_byte_count));
  2050. } else {
  2051. ql_dbg(ql_dbg_user, vha, 0x5040,
  2052. "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
  2053. "error subcode 1=0x%x error subcode 2=0x%x.\n",
  2054. type, sp->handle, comp_status,
  2055. le32_to_cpu(ese->error_subcode_1),
  2056. le32_to_cpu(ese->error_subcode_2));
  2057. res = DID_ERROR << 16;
  2058. bsg_reply->reply_payload_rcv_len = 0;
  2059. }
  2060. memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
  2061. fw_status, sizeof(fw_status));
  2062. ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
  2063. pkt, sizeof(*pkt));
  2064. }
  2065. else {
  2066. res = DID_OK << 16;
  2067. bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
  2068. bsg_job->reply_len = 0;
  2069. }
  2070. els_ct_done:
  2071. sp->done(sp, res);
  2072. }
  2073. static void
  2074. qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
  2075. struct logio_entry_24xx *logio)
  2076. {
  2077. const char func[] = "LOGIO-IOCB";
  2078. const char *type;
  2079. fc_port_t *fcport;
  2080. srb_t *sp;
  2081. struct srb_iocb *lio;
  2082. uint16_t *data;
  2083. uint32_t iop[2];
  2084. int logit = 1;
  2085. sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
  2086. if (!sp)
  2087. return;
  2088. lio = &sp->u.iocb_cmd;
  2089. type = sp->name;
  2090. fcport = sp->fcport;
  2091. data = lio->u.logio.data;
  2092. data[0] = MBS_COMMAND_ERROR;
  2093. data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
  2094. QLA_LOGIO_LOGIN_RETRIED : 0;
  2095. if (logio->entry_status) {
  2096. ql_log(ql_log_warn, fcport->vha, 0x5034,
  2097. "Async-%s error entry - %8phC hdl=%x"
  2098. "portid=%02x%02x%02x entry-status=%x.\n",
  2099. type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
  2100. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  2101. logio->entry_status);
  2102. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
  2103. logio, sizeof(*logio));
  2104. goto logio_done;
  2105. }
  2106. if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
  2107. ql_dbg(ql_dbg_async, sp->vha, 0x5036,
  2108. "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n",
  2109. type, sp->handle, fcport->d_id.b24, fcport->port_name,
  2110. le32_to_cpu(logio->io_parameter[0]));
  2111. vha->hw->exch_starvation = 0;
  2112. data[0] = MBS_COMMAND_COMPLETE;
  2113. if (sp->type == SRB_PRLI_CMD) {
  2114. lio->u.logio.iop[0] =
  2115. le32_to_cpu(logio->io_parameter[0]);
  2116. lio->u.logio.iop[1] =
  2117. le32_to_cpu(logio->io_parameter[1]);
  2118. goto logio_done;
  2119. }
  2120. if (sp->type != SRB_LOGIN_CMD)
  2121. goto logio_done;
  2122. lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]);
  2123. if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP)
  2124. fcport->flags |= FCF_FCSP_DEVICE;
  2125. iop[0] = le32_to_cpu(logio->io_parameter[0]);
  2126. if (iop[0] & BIT_4) {
  2127. fcport->port_type = FCT_TARGET;
  2128. if (iop[0] & BIT_8)
  2129. fcport->flags |= FCF_FCP2_DEVICE;
  2130. } else if (iop[0] & BIT_5)
  2131. fcport->port_type = FCT_INITIATOR;
  2132. if (iop[0] & BIT_7)
  2133. fcport->flags |= FCF_CONF_COMP_SUPPORTED;
  2134. if (logio->io_parameter[7] || logio->io_parameter[8])
  2135. fcport->supported_classes |= FC_COS_CLASS2;
  2136. if (logio->io_parameter[9] || logio->io_parameter[10])
  2137. fcport->supported_classes |= FC_COS_CLASS3;
  2138. goto logio_done;
  2139. }
  2140. iop[0] = le32_to_cpu(logio->io_parameter[0]);
  2141. iop[1] = le32_to_cpu(logio->io_parameter[1]);
  2142. lio->u.logio.iop[0] = iop[0];
  2143. lio->u.logio.iop[1] = iop[1];
  2144. switch (iop[0]) {
  2145. case LSC_SCODE_PORTID_USED:
  2146. data[0] = MBS_PORT_ID_USED;
  2147. data[1] = LSW(iop[1]);
  2148. logit = 0;
  2149. break;
  2150. case LSC_SCODE_NPORT_USED:
  2151. data[0] = MBS_LOOP_ID_USED;
  2152. logit = 0;
  2153. break;
  2154. case LSC_SCODE_CMD_FAILED:
  2155. if (iop[1] == 0x0606) {
  2156. /*
  2157. * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
  2158. * Target side acked.
  2159. */
  2160. data[0] = MBS_COMMAND_COMPLETE;
  2161. goto logio_done;
  2162. }
  2163. data[0] = MBS_COMMAND_ERROR;
  2164. break;
  2165. case LSC_SCODE_NOXCB:
  2166. vha->hw->exch_starvation++;
  2167. if (vha->hw->exch_starvation > 5) {
  2168. ql_log(ql_log_warn, vha, 0xd046,
  2169. "Exchange starvation. Resetting RISC\n");
  2170. vha->hw->exch_starvation = 0;
  2171. if (IS_P3P_TYPE(vha->hw))
  2172. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  2173. else
  2174. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2175. qla2xxx_wake_dpc(vha);
  2176. }
  2177. fallthrough;
  2178. default:
  2179. data[0] = MBS_COMMAND_ERROR;
  2180. break;
  2181. }
  2182. if (logit)
  2183. ql_log(ql_log_warn, sp->vha, 0x5037, "Async-%s failed: "
  2184. "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
  2185. type, sp->handle, fcport->d_id.b24, fcport->port_name,
  2186. le16_to_cpu(logio->comp_status),
  2187. le32_to_cpu(logio->io_parameter[0]),
  2188. le32_to_cpu(logio->io_parameter[1]));
  2189. else
  2190. ql_dbg(ql_dbg_disc, sp->vha, 0x5037, "Async-%s failed: "
  2191. "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
  2192. type, sp->handle, fcport->d_id.b24, fcport->port_name,
  2193. le16_to_cpu(logio->comp_status),
  2194. le32_to_cpu(logio->io_parameter[0]),
  2195. le32_to_cpu(logio->io_parameter[1]));
  2196. logio_done:
  2197. sp->done(sp, 0);
  2198. }
  2199. static void
  2200. qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
  2201. {
  2202. const char func[] = "TMF-IOCB";
  2203. const char *type;
  2204. fc_port_t *fcport;
  2205. srb_t *sp;
  2206. struct srb_iocb *iocb;
  2207. struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
  2208. u16 comp_status;
  2209. sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
  2210. if (!sp)
  2211. return;
  2212. comp_status = le16_to_cpu(sts->comp_status);
  2213. iocb = &sp->u.iocb_cmd;
  2214. type = sp->name;
  2215. fcport = sp->fcport;
  2216. iocb->u.tmf.data = QLA_SUCCESS;
  2217. if (sts->entry_status) {
  2218. ql_log(ql_log_warn, fcport->vha, 0x5038,
  2219. "Async-%s error - hdl=%x entry-status(%x).\n",
  2220. type, sp->handle, sts->entry_status);
  2221. iocb->u.tmf.data = QLA_FUNCTION_FAILED;
  2222. } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
  2223. ql_log(ql_log_warn, fcport->vha, 0x5039,
  2224. "Async-%s error - hdl=%x completion status(%x).\n",
  2225. type, sp->handle, comp_status);
  2226. iocb->u.tmf.data = QLA_FUNCTION_FAILED;
  2227. } else if ((le16_to_cpu(sts->scsi_status) &
  2228. SS_RESPONSE_INFO_LEN_VALID)) {
  2229. host_to_fcp_swap(sts->data, sizeof(sts->data));
  2230. if (le32_to_cpu(sts->rsp_data_len) < 4) {
  2231. ql_log(ql_log_warn, fcport->vha, 0x503b,
  2232. "Async-%s error - hdl=%x not enough response(%d).\n",
  2233. type, sp->handle, sts->rsp_data_len);
  2234. } else if (sts->data[3]) {
  2235. ql_log(ql_log_warn, fcport->vha, 0x503c,
  2236. "Async-%s error - hdl=%x response(%x).\n",
  2237. type, sp->handle, sts->data[3]);
  2238. iocb->u.tmf.data = QLA_FUNCTION_FAILED;
  2239. }
  2240. }
  2241. switch (comp_status) {
  2242. case CS_PORT_LOGGED_OUT:
  2243. case CS_PORT_CONFIG_CHG:
  2244. case CS_PORT_BUSY:
  2245. case CS_INCOMPLETE:
  2246. case CS_PORT_UNAVAILABLE:
  2247. case CS_RESET:
  2248. if (atomic_read(&fcport->state) == FCS_ONLINE) {
  2249. ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
  2250. "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n",
  2251. fcport->d_id.b.domain, fcport->d_id.b.area,
  2252. fcport->d_id.b.al_pa,
  2253. port_state_str[FCS_ONLINE],
  2254. comp_status);
  2255. qlt_schedule_sess_for_deletion(fcport);
  2256. }
  2257. break;
  2258. default:
  2259. break;
  2260. }
  2261. if (iocb->u.tmf.data != QLA_SUCCESS)
  2262. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
  2263. sts, sizeof(*sts));
  2264. sp->done(sp, 0);
  2265. }
  2266. static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
  2267. void *tsk, srb_t *sp)
  2268. {
  2269. fc_port_t *fcport;
  2270. struct srb_iocb *iocb;
  2271. struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
  2272. uint16_t state_flags;
  2273. struct nvmefc_fcp_req *fd;
  2274. uint16_t ret = QLA_SUCCESS;
  2275. __le16 comp_status = sts->comp_status;
  2276. int logit = 0;
  2277. iocb = &sp->u.iocb_cmd;
  2278. fcport = sp->fcport;
  2279. iocb->u.nvme.comp_status = comp_status;
  2280. state_flags = le16_to_cpu(sts->state_flags);
  2281. fd = iocb->u.nvme.desc;
  2282. if (unlikely(iocb->u.nvme.aen_op))
  2283. atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
  2284. else
  2285. sp->qpair->cmd_completion_cnt++;
  2286. if (unlikely(comp_status != CS_COMPLETE))
  2287. logit = 1;
  2288. fd->transferred_length = fd->payload_length -
  2289. le32_to_cpu(sts->residual_len);
  2290. /*
  2291. * State flags: Bit 6 and 0.
  2292. * If 0 is set, we don't care about 6.
  2293. * both cases resp was dma'd to host buffer
  2294. * if both are 0, that is good path case.
  2295. * if six is set and 0 is clear, we need to
  2296. * copy resp data from status iocb to resp buffer.
  2297. */
  2298. if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
  2299. iocb->u.nvme.rsp_pyld_len = 0;
  2300. } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
  2301. (SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
  2302. /* Response already DMA'd to fd->rspaddr. */
  2303. iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
  2304. } else if ((state_flags & SF_FCP_RSP_DMA)) {
  2305. /*
  2306. * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
  2307. * as an error.
  2308. */
  2309. iocb->u.nvme.rsp_pyld_len = 0;
  2310. fd->transferred_length = 0;
  2311. ql_dbg(ql_dbg_io, fcport->vha, 0x307a,
  2312. "Unexpected values in NVMe_RSP IU.\n");
  2313. logit = 1;
  2314. } else if (state_flags & SF_NVME_ERSP) {
  2315. uint32_t *inbuf, *outbuf;
  2316. uint16_t iter;
  2317. inbuf = (uint32_t *)&sts->nvme_ersp_data;
  2318. outbuf = (uint32_t *)fd->rspaddr;
  2319. iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
  2320. if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
  2321. sizeof(struct nvme_fc_ersp_iu))) {
  2322. if (ql_mask_match(ql_dbg_io)) {
  2323. WARN_ONCE(1, "Unexpected response payload length %u.\n",
  2324. iocb->u.nvme.rsp_pyld_len);
  2325. ql_log(ql_log_warn, fcport->vha, 0x5100,
  2326. "Unexpected response payload length %u.\n",
  2327. iocb->u.nvme.rsp_pyld_len);
  2328. }
  2329. iocb->u.nvme.rsp_pyld_len =
  2330. cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
  2331. }
  2332. iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
  2333. for (; iter; iter--)
  2334. *outbuf++ = swab32(*inbuf++);
  2335. }
  2336. if (state_flags & SF_NVME_ERSP) {
  2337. struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr;
  2338. u32 tgt_xfer_len;
  2339. tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
  2340. if (fd->transferred_length != tgt_xfer_len) {
  2341. ql_log(ql_log_warn, fcport->vha, 0x3079,
  2342. "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
  2343. tgt_xfer_len, fd->transferred_length);
  2344. logit = 1;
  2345. } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
  2346. /*
  2347. * Do not log if this is just an underflow and there
  2348. * is no data loss.
  2349. */
  2350. logit = 0;
  2351. }
  2352. }
  2353. if (unlikely(logit))
  2354. ql_dbg(ql_dbg_io, fcport->vha, 0x5060,
  2355. "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
  2356. sp->name, sp->handle, comp_status,
  2357. fd->transferred_length, le32_to_cpu(sts->residual_len),
  2358. sts->ox_id);
  2359. /*
  2360. * If transport error then Failure (HBA rejects request)
  2361. * otherwise transport will handle.
  2362. */
  2363. switch (le16_to_cpu(comp_status)) {
  2364. case CS_COMPLETE:
  2365. break;
  2366. case CS_RESET:
  2367. case CS_PORT_UNAVAILABLE:
  2368. case CS_PORT_LOGGED_OUT:
  2369. fcport->nvme_flag |= NVME_FLAG_RESETTING;
  2370. if (atomic_read(&fcport->state) == FCS_ONLINE) {
  2371. ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
  2372. "Port to be marked lost on fcport=%06x, current "
  2373. "port state= %s comp_status %x.\n",
  2374. fcport->d_id.b24, port_state_str[FCS_ONLINE],
  2375. comp_status);
  2376. qlt_schedule_sess_for_deletion(fcport);
  2377. }
  2378. fallthrough;
  2379. case CS_ABORTED:
  2380. case CS_PORT_BUSY:
  2381. fd->transferred_length = 0;
  2382. iocb->u.nvme.rsp_pyld_len = 0;
  2383. ret = QLA_ABORTED;
  2384. break;
  2385. case CS_DATA_UNDERRUN:
  2386. break;
  2387. default:
  2388. ret = QLA_FUNCTION_FAILED;
  2389. break;
  2390. }
  2391. sp->done(sp, ret);
  2392. }
  2393. static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
  2394. struct vp_ctrl_entry_24xx *vce)
  2395. {
  2396. const char func[] = "CTRLVP-IOCB";
  2397. srb_t *sp;
  2398. int rval = QLA_SUCCESS;
  2399. sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
  2400. if (!sp)
  2401. return;
  2402. if (vce->entry_status != 0) {
  2403. ql_dbg(ql_dbg_vport, vha, 0x10c4,
  2404. "%s: Failed to complete IOCB -- error status (%x)\n",
  2405. sp->name, vce->entry_status);
  2406. rval = QLA_FUNCTION_FAILED;
  2407. } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
  2408. ql_dbg(ql_dbg_vport, vha, 0x10c5,
  2409. "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
  2410. sp->name, le16_to_cpu(vce->comp_status),
  2411. le16_to_cpu(vce->vp_idx_failed));
  2412. rval = QLA_FUNCTION_FAILED;
  2413. } else {
  2414. ql_dbg(ql_dbg_vport, vha, 0x10c6,
  2415. "Done %s.\n", __func__);
  2416. }
  2417. sp->rc = rval;
  2418. sp->done(sp, rval);
  2419. }
  2420. /* Process a single response queue entry. */
  2421. static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
  2422. struct rsp_que *rsp,
  2423. sts_entry_t *pkt)
  2424. {
  2425. sts21_entry_t *sts21_entry;
  2426. sts22_entry_t *sts22_entry;
  2427. uint16_t handle_cnt;
  2428. uint16_t cnt;
  2429. switch (pkt->entry_type) {
  2430. case STATUS_TYPE:
  2431. qla2x00_status_entry(vha, rsp, pkt);
  2432. break;
  2433. case STATUS_TYPE_21:
  2434. sts21_entry = (sts21_entry_t *)pkt;
  2435. handle_cnt = sts21_entry->handle_count;
  2436. for (cnt = 0; cnt < handle_cnt; cnt++)
  2437. qla2x00_process_completed_request(vha, rsp->req,
  2438. sts21_entry->handle[cnt]);
  2439. break;
  2440. case STATUS_TYPE_22:
  2441. sts22_entry = (sts22_entry_t *)pkt;
  2442. handle_cnt = sts22_entry->handle_count;
  2443. for (cnt = 0; cnt < handle_cnt; cnt++)
  2444. qla2x00_process_completed_request(vha, rsp->req,
  2445. sts22_entry->handle[cnt]);
  2446. break;
  2447. case STATUS_CONT_TYPE:
  2448. qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
  2449. break;
  2450. case MBX_IOCB_TYPE:
  2451. qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
  2452. break;
  2453. case CT_IOCB_TYPE:
  2454. qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
  2455. break;
  2456. default:
  2457. /* Type Not Supported. */
  2458. ql_log(ql_log_warn, vha, 0x504a,
  2459. "Received unknown response pkt type %x entry status=%x.\n",
  2460. pkt->entry_type, pkt->entry_status);
  2461. break;
  2462. }
  2463. }
  2464. /**
  2465. * qla2x00_process_response_queue() - Process response queue entries.
  2466. * @rsp: response queue
  2467. */
  2468. void
  2469. qla2x00_process_response_queue(struct rsp_que *rsp)
  2470. {
  2471. struct scsi_qla_host *vha;
  2472. struct qla_hw_data *ha = rsp->hw;
  2473. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  2474. sts_entry_t *pkt;
  2475. vha = pci_get_drvdata(ha->pdev);
  2476. if (!vha->flags.online)
  2477. return;
  2478. while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
  2479. pkt = (sts_entry_t *)rsp->ring_ptr;
  2480. rsp->ring_index++;
  2481. if (rsp->ring_index == rsp->length) {
  2482. rsp->ring_index = 0;
  2483. rsp->ring_ptr = rsp->ring;
  2484. } else {
  2485. rsp->ring_ptr++;
  2486. }
  2487. if (pkt->entry_status != 0) {
  2488. qla2x00_error_entry(vha, rsp, pkt);
  2489. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  2490. wmb();
  2491. continue;
  2492. }
  2493. qla2x00_process_response_entry(vha, rsp, pkt);
  2494. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  2495. wmb();
  2496. }
  2497. /* Adjust ring index */
  2498. wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
  2499. }
  2500. static inline void
  2501. qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
  2502. uint32_t sense_len, struct rsp_que *rsp, int res)
  2503. {
  2504. struct scsi_qla_host *vha = sp->vha;
  2505. struct scsi_cmnd *cp = GET_CMD_SP(sp);
  2506. uint32_t track_sense_len;
  2507. if (sense_len >= SCSI_SENSE_BUFFERSIZE)
  2508. sense_len = SCSI_SENSE_BUFFERSIZE;
  2509. SET_CMD_SENSE_LEN(sp, sense_len);
  2510. SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
  2511. track_sense_len = sense_len;
  2512. if (sense_len > par_sense_len)
  2513. sense_len = par_sense_len;
  2514. memcpy(cp->sense_buffer, sense_data, sense_len);
  2515. SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
  2516. track_sense_len -= sense_len;
  2517. SET_CMD_SENSE_LEN(sp, track_sense_len);
  2518. if (track_sense_len != 0) {
  2519. rsp->status_srb = sp;
  2520. cp->result = res;
  2521. }
  2522. if (sense_len) {
  2523. ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
  2524. "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
  2525. sp->vha->host_no, cp->device->id, cp->device->lun,
  2526. cp);
  2527. ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
  2528. cp->sense_buffer, sense_len);
  2529. }
  2530. }
  2531. struct scsi_dif_tuple {
  2532. __be16 guard; /* Checksum */
  2533. __be16 app_tag; /* APPL identifier */
  2534. __be32 ref_tag; /* Target LBA or indirect LBA */
  2535. };
  2536. /*
  2537. * Checks the guard or meta-data for the type of error
  2538. * detected by the HBA. In case of errors, we set the
  2539. * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
  2540. * to indicate to the kernel that the HBA detected error.
  2541. */
  2542. static inline int
  2543. qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
  2544. {
  2545. struct scsi_qla_host *vha = sp->vha;
  2546. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  2547. uint8_t *ap = &sts24->data[12];
  2548. uint8_t *ep = &sts24->data[20];
  2549. uint32_t e_ref_tag, a_ref_tag;
  2550. uint16_t e_app_tag, a_app_tag;
  2551. uint16_t e_guard, a_guard;
  2552. /*
  2553. * swab32 of the "data" field in the beginning of qla2x00_status_entry()
  2554. * would make guard field appear at offset 2
  2555. */
  2556. a_guard = get_unaligned_le16(ap + 2);
  2557. a_app_tag = get_unaligned_le16(ap + 0);
  2558. a_ref_tag = get_unaligned_le32(ap + 4);
  2559. e_guard = get_unaligned_le16(ep + 2);
  2560. e_app_tag = get_unaligned_le16(ep + 0);
  2561. e_ref_tag = get_unaligned_le32(ep + 4);
  2562. ql_dbg(ql_dbg_io, vha, 0x3023,
  2563. "iocb(s) %p Returned STATUS.\n", sts24);
  2564. ql_dbg(ql_dbg_io, vha, 0x3024,
  2565. "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
  2566. " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
  2567. " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
  2568. cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
  2569. a_app_tag, e_app_tag, a_guard, e_guard);
  2570. /*
  2571. * Ignore sector if:
  2572. * For type 3: ref & app tag is all 'f's
  2573. * For type 0,1,2: app tag is all 'f's
  2574. */
  2575. if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
  2576. (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
  2577. a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
  2578. uint32_t blocks_done, resid;
  2579. sector_t lba_s = scsi_get_lba(cmd);
  2580. /* 2TB boundary case covered automatically with this */
  2581. blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
  2582. resid = scsi_bufflen(cmd) - (blocks_done *
  2583. cmd->device->sector_size);
  2584. scsi_set_resid(cmd, resid);
  2585. cmd->result = DID_OK << 16;
  2586. /* Update protection tag */
  2587. if (scsi_prot_sg_count(cmd)) {
  2588. uint32_t i, j = 0, k = 0, num_ent;
  2589. struct scatterlist *sg;
  2590. struct t10_pi_tuple *spt;
  2591. /* Patch the corresponding protection tags */
  2592. scsi_for_each_prot_sg(cmd, sg,
  2593. scsi_prot_sg_count(cmd), i) {
  2594. num_ent = sg_dma_len(sg) / 8;
  2595. if (k + num_ent < blocks_done) {
  2596. k += num_ent;
  2597. continue;
  2598. }
  2599. j = blocks_done - k - 1;
  2600. k = blocks_done;
  2601. break;
  2602. }
  2603. if (k != blocks_done) {
  2604. ql_log(ql_log_warn, vha, 0x302f,
  2605. "unexpected tag values tag:lba=%x:%llx)\n",
  2606. e_ref_tag, (unsigned long long)lba_s);
  2607. return 1;
  2608. }
  2609. spt = page_address(sg_page(sg)) + sg->offset;
  2610. spt += j;
  2611. spt->app_tag = T10_PI_APP_ESCAPE;
  2612. if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
  2613. spt->ref_tag = T10_PI_REF_ESCAPE;
  2614. }
  2615. return 0;
  2616. }
  2617. /* check guard */
  2618. if (e_guard != a_guard) {
  2619. scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
  2620. set_host_byte(cmd, DID_ABORT);
  2621. return 1;
  2622. }
  2623. /* check ref tag */
  2624. if (e_ref_tag != a_ref_tag) {
  2625. scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
  2626. set_host_byte(cmd, DID_ABORT);
  2627. return 1;
  2628. }
  2629. /* check appl tag */
  2630. if (e_app_tag != a_app_tag) {
  2631. scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
  2632. set_host_byte(cmd, DID_ABORT);
  2633. return 1;
  2634. }
  2635. return 1;
  2636. }
  2637. static void
  2638. qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
  2639. struct req_que *req, uint32_t index)
  2640. {
  2641. struct qla_hw_data *ha = vha->hw;
  2642. srb_t *sp;
  2643. uint16_t comp_status;
  2644. uint16_t scsi_status;
  2645. uint16_t thread_id;
  2646. uint32_t rval = EXT_STATUS_OK;
  2647. struct bsg_job *bsg_job = NULL;
  2648. struct fc_bsg_request *bsg_request;
  2649. struct fc_bsg_reply *bsg_reply;
  2650. sts_entry_t *sts = pkt;
  2651. struct sts_entry_24xx *sts24 = pkt;
  2652. /* Validate handle. */
  2653. if (index >= req->num_outstanding_cmds) {
  2654. ql_log(ql_log_warn, vha, 0x70af,
  2655. "Invalid SCSI completion handle 0x%x.\n", index);
  2656. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2657. return;
  2658. }
  2659. sp = req->outstanding_cmds[index];
  2660. if (!sp) {
  2661. ql_log(ql_log_warn, vha, 0x70b0,
  2662. "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
  2663. req->id, index);
  2664. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2665. return;
  2666. }
  2667. /* Free outstanding command slot. */
  2668. req->outstanding_cmds[index] = NULL;
  2669. bsg_job = sp->u.bsg_job;
  2670. bsg_request = bsg_job->request;
  2671. bsg_reply = bsg_job->reply;
  2672. if (IS_FWI2_CAPABLE(ha)) {
  2673. comp_status = le16_to_cpu(sts24->comp_status);
  2674. scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
  2675. } else {
  2676. comp_status = le16_to_cpu(sts->comp_status);
  2677. scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
  2678. }
  2679. thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
  2680. switch (comp_status) {
  2681. case CS_COMPLETE:
  2682. if (scsi_status == 0) {
  2683. bsg_reply->reply_payload_rcv_len =
  2684. bsg_job->reply_payload.payload_len;
  2685. vha->qla_stats.input_bytes +=
  2686. bsg_reply->reply_payload_rcv_len;
  2687. vha->qla_stats.input_requests++;
  2688. rval = EXT_STATUS_OK;
  2689. }
  2690. goto done;
  2691. case CS_DATA_OVERRUN:
  2692. ql_dbg(ql_dbg_user, vha, 0x70b1,
  2693. "Command completed with data overrun thread_id=%d\n",
  2694. thread_id);
  2695. rval = EXT_STATUS_DATA_OVERRUN;
  2696. break;
  2697. case CS_DATA_UNDERRUN:
  2698. ql_dbg(ql_dbg_user, vha, 0x70b2,
  2699. "Command completed with data underrun thread_id=%d\n",
  2700. thread_id);
  2701. rval = EXT_STATUS_DATA_UNDERRUN;
  2702. break;
  2703. case CS_BIDIR_RD_OVERRUN:
  2704. ql_dbg(ql_dbg_user, vha, 0x70b3,
  2705. "Command completed with read data overrun thread_id=%d\n",
  2706. thread_id);
  2707. rval = EXT_STATUS_DATA_OVERRUN;
  2708. break;
  2709. case CS_BIDIR_RD_WR_OVERRUN:
  2710. ql_dbg(ql_dbg_user, vha, 0x70b4,
  2711. "Command completed with read and write data overrun "
  2712. "thread_id=%d\n", thread_id);
  2713. rval = EXT_STATUS_DATA_OVERRUN;
  2714. break;
  2715. case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
  2716. ql_dbg(ql_dbg_user, vha, 0x70b5,
  2717. "Command completed with read data over and write data "
  2718. "underrun thread_id=%d\n", thread_id);
  2719. rval = EXT_STATUS_DATA_OVERRUN;
  2720. break;
  2721. case CS_BIDIR_RD_UNDERRUN:
  2722. ql_dbg(ql_dbg_user, vha, 0x70b6,
  2723. "Command completed with read data underrun "
  2724. "thread_id=%d\n", thread_id);
  2725. rval = EXT_STATUS_DATA_UNDERRUN;
  2726. break;
  2727. case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
  2728. ql_dbg(ql_dbg_user, vha, 0x70b7,
  2729. "Command completed with read data under and write data "
  2730. "overrun thread_id=%d\n", thread_id);
  2731. rval = EXT_STATUS_DATA_UNDERRUN;
  2732. break;
  2733. case CS_BIDIR_RD_WR_UNDERRUN:
  2734. ql_dbg(ql_dbg_user, vha, 0x70b8,
  2735. "Command completed with read and write data underrun "
  2736. "thread_id=%d\n", thread_id);
  2737. rval = EXT_STATUS_DATA_UNDERRUN;
  2738. break;
  2739. case CS_BIDIR_DMA:
  2740. ql_dbg(ql_dbg_user, vha, 0x70b9,
  2741. "Command completed with data DMA error thread_id=%d\n",
  2742. thread_id);
  2743. rval = EXT_STATUS_DMA_ERR;
  2744. break;
  2745. case CS_TIMEOUT:
  2746. ql_dbg(ql_dbg_user, vha, 0x70ba,
  2747. "Command completed with timeout thread_id=%d\n",
  2748. thread_id);
  2749. rval = EXT_STATUS_TIMEOUT;
  2750. break;
  2751. default:
  2752. ql_dbg(ql_dbg_user, vha, 0x70bb,
  2753. "Command completed with completion status=0x%x "
  2754. "thread_id=%d\n", comp_status, thread_id);
  2755. rval = EXT_STATUS_ERR;
  2756. break;
  2757. }
  2758. bsg_reply->reply_payload_rcv_len = 0;
  2759. done:
  2760. /* Return the vendor specific reply to API */
  2761. bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
  2762. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  2763. /* Always return DID_OK, bsg will send the vendor specific response
  2764. * in this case only */
  2765. sp->done(sp, DID_OK << 16);
  2766. }
  2767. /**
  2768. * qla2x00_status_entry() - Process a Status IOCB entry.
  2769. * @vha: SCSI driver HA context
  2770. * @rsp: response queue
  2771. * @pkt: Entry pointer
  2772. */
  2773. static void
  2774. qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
  2775. {
  2776. srb_t *sp;
  2777. fc_port_t *fcport;
  2778. struct scsi_cmnd *cp;
  2779. sts_entry_t *sts = pkt;
  2780. struct sts_entry_24xx *sts24 = pkt;
  2781. uint16_t comp_status;
  2782. uint16_t scsi_status;
  2783. uint16_t ox_id;
  2784. uint8_t lscsi_status;
  2785. int32_t resid;
  2786. uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
  2787. fw_resid_len;
  2788. uint8_t *rsp_info, *sense_data;
  2789. struct qla_hw_data *ha = vha->hw;
  2790. uint32_t handle;
  2791. uint16_t que;
  2792. struct req_que *req;
  2793. int logit = 1;
  2794. int res = 0;
  2795. uint16_t state_flags = 0;
  2796. uint16_t sts_qual = 0;
  2797. if (IS_FWI2_CAPABLE(ha)) {
  2798. comp_status = le16_to_cpu(sts24->comp_status);
  2799. scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
  2800. state_flags = le16_to_cpu(sts24->state_flags);
  2801. } else {
  2802. comp_status = le16_to_cpu(sts->comp_status);
  2803. scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
  2804. }
  2805. handle = (uint32_t) LSW(sts->handle);
  2806. que = MSW(sts->handle);
  2807. req = ha->req_q_map[que];
  2808. /* Check for invalid queue pointer */
  2809. if (req == NULL ||
  2810. que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
  2811. ql_dbg(ql_dbg_io, vha, 0x3059,
  2812. "Invalid status handle (0x%x): Bad req pointer. req=%p, "
  2813. "que=%u.\n", sts->handle, req, que);
  2814. return;
  2815. }
  2816. /* Validate handle. */
  2817. if (handle < req->num_outstanding_cmds) {
  2818. sp = req->outstanding_cmds[handle];
  2819. if (!sp) {
  2820. ql_dbg(ql_dbg_io, vha, 0x3075,
  2821. "%s(%ld): Already returned command for status handle (0x%x).\n",
  2822. __func__, vha->host_no, sts->handle);
  2823. return;
  2824. }
  2825. } else {
  2826. ql_dbg(ql_dbg_io, vha, 0x3017,
  2827. "Invalid status handle, out of range (0x%x).\n",
  2828. sts->handle);
  2829. if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
  2830. if (IS_P3P_TYPE(ha))
  2831. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  2832. else
  2833. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2834. qla2xxx_wake_dpc(vha);
  2835. }
  2836. return;
  2837. }
  2838. qla_put_fw_resources(sp->qpair, &sp->iores);
  2839. if (sp->cmd_type != TYPE_SRB) {
  2840. req->outstanding_cmds[handle] = NULL;
  2841. ql_dbg(ql_dbg_io, vha, 0x3015,
  2842. "Unknown sp->cmd_type %x %p).\n",
  2843. sp->cmd_type, sp);
  2844. return;
  2845. }
  2846. /* NVME completion. */
  2847. if (sp->type == SRB_NVME_CMD) {
  2848. req->outstanding_cmds[handle] = NULL;
  2849. qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
  2850. return;
  2851. }
  2852. if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
  2853. qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
  2854. return;
  2855. }
  2856. /* Task Management completion. */
  2857. if (sp->type == SRB_TM_CMD) {
  2858. qla24xx_tm_iocb_entry(vha, req, pkt);
  2859. return;
  2860. }
  2861. /* Fast path completion. */
  2862. qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24);
  2863. sp->qpair->cmd_completion_cnt++;
  2864. if (comp_status == CS_COMPLETE && scsi_status == 0) {
  2865. qla2x00_process_completed_request(vha, req, handle);
  2866. return;
  2867. }
  2868. cp = GET_CMD_SP(sp);
  2869. if (cp == NULL) {
  2870. ql_dbg(ql_dbg_io, vha, 0x3018,
  2871. "Command already returned (0x%x/%p).\n",
  2872. sts->handle, sp);
  2873. req->outstanding_cmds[handle] = NULL;
  2874. return;
  2875. }
  2876. lscsi_status = scsi_status & STATUS_MASK;
  2877. fcport = sp->fcport;
  2878. ox_id = 0;
  2879. sense_len = par_sense_len = rsp_info_len = resid_len =
  2880. fw_resid_len = 0;
  2881. if (IS_FWI2_CAPABLE(ha)) {
  2882. if (scsi_status & SS_SENSE_LEN_VALID)
  2883. sense_len = le32_to_cpu(sts24->sense_len);
  2884. if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
  2885. rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
  2886. if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
  2887. resid_len = le32_to_cpu(sts24->rsp_residual_count);
  2888. if (comp_status == CS_DATA_UNDERRUN)
  2889. fw_resid_len = le32_to_cpu(sts24->residual_len);
  2890. rsp_info = sts24->data;
  2891. sense_data = sts24->data;
  2892. host_to_fcp_swap(sts24->data, sizeof(sts24->data));
  2893. ox_id = le16_to_cpu(sts24->ox_id);
  2894. par_sense_len = sizeof(sts24->data);
  2895. sts_qual = le16_to_cpu(sts24->status_qualifier);
  2896. } else {
  2897. if (scsi_status & SS_SENSE_LEN_VALID)
  2898. sense_len = le16_to_cpu(sts->req_sense_length);
  2899. if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
  2900. rsp_info_len = le16_to_cpu(sts->rsp_info_len);
  2901. resid_len = le32_to_cpu(sts->residual_length);
  2902. rsp_info = sts->rsp_info;
  2903. sense_data = sts->req_sense_data;
  2904. par_sense_len = sizeof(sts->req_sense_data);
  2905. }
  2906. /* Check for any FCP transport errors. */
  2907. if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
  2908. /* Sense data lies beyond any FCP RESPONSE data. */
  2909. if (IS_FWI2_CAPABLE(ha)) {
  2910. sense_data += rsp_info_len;
  2911. par_sense_len -= rsp_info_len;
  2912. }
  2913. if (rsp_info_len > 3 && rsp_info[3]) {
  2914. ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
  2915. "FCP I/O protocol failure (0x%x/0x%x).\n",
  2916. rsp_info_len, rsp_info[3]);
  2917. res = DID_BUS_BUSY << 16;
  2918. goto out;
  2919. }
  2920. }
  2921. /* Check for overrun. */
  2922. if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
  2923. scsi_status & SS_RESIDUAL_OVER)
  2924. comp_status = CS_DATA_OVERRUN;
  2925. /*
  2926. * Check retry_delay_timer value if we receive a busy or
  2927. * queue full.
  2928. */
  2929. if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL ||
  2930. lscsi_status == SAM_STAT_BUSY))
  2931. qla2x00_set_retry_delay_timestamp(fcport, sts_qual);
  2932. /*
  2933. * Based on Host and scsi status generate status code for Linux
  2934. */
  2935. switch (comp_status) {
  2936. case CS_COMPLETE:
  2937. case CS_QUEUE_FULL:
  2938. if (scsi_status == 0) {
  2939. res = DID_OK << 16;
  2940. break;
  2941. }
  2942. if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
  2943. resid = resid_len;
  2944. scsi_set_resid(cp, resid);
  2945. if (!lscsi_status &&
  2946. ((unsigned)(scsi_bufflen(cp) - resid) <
  2947. cp->underflow)) {
  2948. ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
  2949. "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
  2950. resid, scsi_bufflen(cp));
  2951. res = DID_ERROR << 16;
  2952. break;
  2953. }
  2954. }
  2955. res = DID_OK << 16 | lscsi_status;
  2956. if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
  2957. ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
  2958. "QUEUE FULL detected.\n");
  2959. break;
  2960. }
  2961. logit = 0;
  2962. if (lscsi_status != SS_CHECK_CONDITION)
  2963. break;
  2964. memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  2965. if (!(scsi_status & SS_SENSE_LEN_VALID))
  2966. break;
  2967. qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
  2968. rsp, res);
  2969. break;
  2970. case CS_DATA_UNDERRUN:
  2971. /* Use F/W calculated residual length. */
  2972. resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
  2973. scsi_set_resid(cp, resid);
  2974. if (scsi_status & SS_RESIDUAL_UNDER) {
  2975. if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
  2976. ql_log(ql_log_warn, fcport->vha, 0x301d,
  2977. "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
  2978. resid, scsi_bufflen(cp));
  2979. res = DID_ERROR << 16 | lscsi_status;
  2980. goto check_scsi_status;
  2981. }
  2982. if (!lscsi_status &&
  2983. ((unsigned)(scsi_bufflen(cp) - resid) <
  2984. cp->underflow)) {
  2985. ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
  2986. "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
  2987. resid, scsi_bufflen(cp));
  2988. res = DID_ERROR << 16;
  2989. break;
  2990. }
  2991. } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
  2992. lscsi_status != SAM_STAT_BUSY) {
  2993. /*
  2994. * scsi status of task set and busy are considered to be
  2995. * task not completed.
  2996. */
  2997. ql_log(ql_log_warn, fcport->vha, 0x301f,
  2998. "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
  2999. resid, scsi_bufflen(cp));
  3000. vha->interface_err_cnt++;
  3001. res = DID_ERROR << 16 | lscsi_status;
  3002. goto check_scsi_status;
  3003. } else {
  3004. ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
  3005. "scsi_status: 0x%x, lscsi_status: 0x%x\n",
  3006. scsi_status, lscsi_status);
  3007. }
  3008. res = DID_OK << 16 | lscsi_status;
  3009. logit = 0;
  3010. check_scsi_status:
  3011. /*
  3012. * Check to see if SCSI Status is non zero. If so report SCSI
  3013. * Status.
  3014. */
  3015. if (lscsi_status != 0) {
  3016. if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
  3017. ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
  3018. "QUEUE FULL detected.\n");
  3019. logit = 1;
  3020. break;
  3021. }
  3022. if (lscsi_status != SS_CHECK_CONDITION)
  3023. break;
  3024. memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  3025. if (!(scsi_status & SS_SENSE_LEN_VALID))
  3026. break;
  3027. qla2x00_handle_sense(sp, sense_data, par_sense_len,
  3028. sense_len, rsp, res);
  3029. }
  3030. break;
  3031. case CS_PORT_LOGGED_OUT:
  3032. case CS_PORT_CONFIG_CHG:
  3033. case CS_PORT_BUSY:
  3034. case CS_INCOMPLETE:
  3035. case CS_PORT_UNAVAILABLE:
  3036. case CS_TIMEOUT:
  3037. case CS_RESET:
  3038. case CS_EDIF_INV_REQ:
  3039. /*
  3040. * We are going to have the fc class block the rport
  3041. * while we try to recover so instruct the mid layer
  3042. * to requeue until the class decides how to handle this.
  3043. */
  3044. res = DID_TRANSPORT_DISRUPTED << 16;
  3045. if (comp_status == CS_TIMEOUT) {
  3046. if (IS_FWI2_CAPABLE(ha))
  3047. break;
  3048. else if ((le16_to_cpu(sts->status_flags) &
  3049. SF_LOGOUT_SENT) == 0)
  3050. break;
  3051. }
  3052. if (atomic_read(&fcport->state) == FCS_ONLINE) {
  3053. ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
  3054. "Port to be marked lost on fcport=%02x%02x%02x, current "
  3055. "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
  3056. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  3057. port_state_str[FCS_ONLINE],
  3058. comp_status);
  3059. qlt_schedule_sess_for_deletion(fcport);
  3060. }
  3061. break;
  3062. case CS_ABORTED:
  3063. res = DID_RESET << 16;
  3064. break;
  3065. case CS_DIF_ERROR:
  3066. logit = qla2x00_handle_dif_error(sp, sts24);
  3067. res = cp->result;
  3068. break;
  3069. case CS_TRANSPORT:
  3070. res = DID_ERROR << 16;
  3071. vha->hw_err_cnt++;
  3072. if (!IS_PI_SPLIT_DET_CAPABLE(ha))
  3073. break;
  3074. if (state_flags & BIT_4)
  3075. scmd_printk(KERN_WARNING, cp,
  3076. "Unsupported device '%s' found.\n",
  3077. cp->device->vendor);
  3078. break;
  3079. case CS_DMA:
  3080. ql_log(ql_log_info, fcport->vha, 0x3022,
  3081. "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
  3082. comp_status, scsi_status, res, vha->host_no,
  3083. cp->device->id, cp->device->lun, fcport->d_id.b24,
  3084. ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
  3085. resid_len, fw_resid_len, sp, cp);
  3086. ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
  3087. pkt, sizeof(*sts24));
  3088. res = DID_ERROR << 16;
  3089. vha->hw_err_cnt++;
  3090. break;
  3091. default:
  3092. res = DID_ERROR << 16;
  3093. break;
  3094. }
  3095. out:
  3096. if (logit)
  3097. ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
  3098. "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
  3099. comp_status, scsi_status, res, vha->host_no,
  3100. cp->device->id, cp->device->lun, fcport->d_id.b.domain,
  3101. fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
  3102. cp->cmnd, scsi_bufflen(cp), rsp_info_len,
  3103. resid_len, fw_resid_len, sp, cp);
  3104. if (rsp->status_srb == NULL)
  3105. sp->done(sp, res);
  3106. /* for io's, clearing of outstanding_cmds[handle] means scsi_done was called */
  3107. req->outstanding_cmds[handle] = NULL;
  3108. }
  3109. /**
  3110. * qla2x00_status_cont_entry() - Process a Status Continuations entry.
  3111. * @rsp: response queue
  3112. * @pkt: Entry pointer
  3113. *
  3114. * Extended sense data.
  3115. */
  3116. static void
  3117. qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
  3118. {
  3119. uint8_t sense_sz = 0;
  3120. struct qla_hw_data *ha = rsp->hw;
  3121. struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
  3122. srb_t *sp = rsp->status_srb;
  3123. struct scsi_cmnd *cp;
  3124. uint32_t sense_len;
  3125. uint8_t *sense_ptr;
  3126. if (!sp || !GET_CMD_SENSE_LEN(sp))
  3127. return;
  3128. sense_len = GET_CMD_SENSE_LEN(sp);
  3129. sense_ptr = GET_CMD_SENSE_PTR(sp);
  3130. cp = GET_CMD_SP(sp);
  3131. if (cp == NULL) {
  3132. ql_log(ql_log_warn, vha, 0x3025,
  3133. "cmd is NULL: already returned to OS (sp=%p).\n", sp);
  3134. rsp->status_srb = NULL;
  3135. return;
  3136. }
  3137. if (sense_len > sizeof(pkt->data))
  3138. sense_sz = sizeof(pkt->data);
  3139. else
  3140. sense_sz = sense_len;
  3141. /* Move sense data. */
  3142. if (IS_FWI2_CAPABLE(ha))
  3143. host_to_fcp_swap(pkt->data, sizeof(pkt->data));
  3144. memcpy(sense_ptr, pkt->data, sense_sz);
  3145. ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
  3146. sense_ptr, sense_sz);
  3147. sense_len -= sense_sz;
  3148. sense_ptr += sense_sz;
  3149. SET_CMD_SENSE_PTR(sp, sense_ptr);
  3150. SET_CMD_SENSE_LEN(sp, sense_len);
  3151. /* Place command on done queue. */
  3152. if (sense_len == 0) {
  3153. rsp->status_srb = NULL;
  3154. sp->done(sp, cp->result);
  3155. }
  3156. }
  3157. /**
  3158. * qla2x00_error_entry() - Process an error entry.
  3159. * @vha: SCSI driver HA context
  3160. * @rsp: response queue
  3161. * @pkt: Entry pointer
  3162. * return : 1=allow further error analysis. 0=no additional error analysis.
  3163. */
  3164. static int
  3165. qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
  3166. {
  3167. srb_t *sp;
  3168. struct qla_hw_data *ha = vha->hw;
  3169. const char func[] = "ERROR-IOCB";
  3170. uint16_t que = MSW(pkt->handle);
  3171. struct req_que *req = NULL;
  3172. int res = DID_ERROR << 16;
  3173. u16 index;
  3174. ql_dbg(ql_dbg_async, vha, 0x502a,
  3175. "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
  3176. pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
  3177. if (que >= ha->max_req_queues || !ha->req_q_map[que])
  3178. goto fatal;
  3179. req = ha->req_q_map[que];
  3180. if (pkt->entry_status & RF_BUSY)
  3181. res = DID_BUS_BUSY << 16;
  3182. if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
  3183. return 0;
  3184. switch (pkt->entry_type) {
  3185. case NOTIFY_ACK_TYPE:
  3186. case STATUS_CONT_TYPE:
  3187. case LOGINOUT_PORT_IOCB_TYPE:
  3188. case CT_IOCB_TYPE:
  3189. case ELS_IOCB_TYPE:
  3190. case ABORT_IOCB_TYPE:
  3191. case MBX_IOCB_TYPE:
  3192. default:
  3193. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  3194. if (sp) {
  3195. sp->done(sp, res);
  3196. return 0;
  3197. }
  3198. break;
  3199. case SA_UPDATE_IOCB_TYPE:
  3200. case ABTS_RESP_24XX:
  3201. case CTIO_TYPE7:
  3202. case CTIO_CRC2:
  3203. return 1;
  3204. case STATUS_TYPE:
  3205. sp = qla_get_sp_from_handle(vha, func, req, pkt, &index);
  3206. if (sp) {
  3207. sp->done(sp, res);
  3208. req->outstanding_cmds[index] = NULL;
  3209. return 0;
  3210. }
  3211. break;
  3212. }
  3213. fatal:
  3214. ql_log(ql_log_warn, vha, 0x5030,
  3215. "Error entry - invalid handle/queue (%04x).\n", que);
  3216. return 0;
  3217. }
  3218. /**
  3219. * qla24xx_mbx_completion() - Process mailbox command completions.
  3220. * @vha: SCSI driver HA context
  3221. * @mb0: Mailbox0 register
  3222. */
  3223. static void
  3224. qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
  3225. {
  3226. uint16_t cnt;
  3227. uint32_t mboxes;
  3228. __le16 __iomem *wptr;
  3229. struct qla_hw_data *ha = vha->hw;
  3230. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  3231. /* Read all mbox registers? */
  3232. WARN_ON_ONCE(ha->mbx_count > 32);
  3233. mboxes = (1ULL << ha->mbx_count) - 1;
  3234. if (!ha->mcp)
  3235. ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
  3236. else
  3237. mboxes = ha->mcp->in_mb;
  3238. /* Load return mailbox registers. */
  3239. ha->flags.mbox_int = 1;
  3240. ha->mailbox_out[0] = mb0;
  3241. mboxes >>= 1;
  3242. wptr = &reg->mailbox1;
  3243. for (cnt = 1; cnt < ha->mbx_count; cnt++) {
  3244. if (mboxes & BIT_0)
  3245. ha->mailbox_out[cnt] = rd_reg_word(wptr);
  3246. mboxes >>= 1;
  3247. wptr++;
  3248. }
  3249. }
  3250. static void
  3251. qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
  3252. struct abort_entry_24xx *pkt)
  3253. {
  3254. const char func[] = "ABT_IOCB";
  3255. srb_t *sp;
  3256. srb_t *orig_sp = NULL;
  3257. struct srb_iocb *abt;
  3258. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  3259. if (!sp)
  3260. return;
  3261. abt = &sp->u.iocb_cmd;
  3262. abt->u.abt.comp_status = pkt->comp_status;
  3263. orig_sp = sp->cmd_sp;
  3264. /* Need to pass original sp */
  3265. if (orig_sp)
  3266. qla_nvme_abort_process_comp_status(pkt, orig_sp);
  3267. sp->done(sp, 0);
  3268. }
  3269. void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
  3270. struct pt_ls4_request *pkt, struct req_que *req)
  3271. {
  3272. srb_t *sp;
  3273. const char func[] = "LS4_IOCB";
  3274. uint16_t comp_status;
  3275. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  3276. if (!sp)
  3277. return;
  3278. comp_status = le16_to_cpu(pkt->status);
  3279. sp->done(sp, comp_status);
  3280. }
  3281. /**
  3282. * qla_chk_cont_iocb_avail - check for all continuation iocbs are available
  3283. * before iocb processing can start.
  3284. * @vha: host adapter pointer
  3285. * @rsp: respond queue
  3286. * @pkt: head iocb describing how many continuation iocb
  3287. * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived.
  3288. */
  3289. static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
  3290. struct rsp_que *rsp, response_t *pkt, u32 rsp_q_in)
  3291. {
  3292. int start_pkt_ring_index;
  3293. u32 iocb_cnt = 0;
  3294. int rc = 0;
  3295. if (pkt->entry_count == 1)
  3296. return rc;
  3297. /* ring_index was pre-increment. set it back to current pkt */
  3298. if (rsp->ring_index == 0)
  3299. start_pkt_ring_index = rsp->length - 1;
  3300. else
  3301. start_pkt_ring_index = rsp->ring_index - 1;
  3302. if (rsp_q_in < start_pkt_ring_index)
  3303. /* q in ptr is wrapped */
  3304. iocb_cnt = rsp->length - start_pkt_ring_index + rsp_q_in;
  3305. else
  3306. iocb_cnt = rsp_q_in - start_pkt_ring_index;
  3307. if (iocb_cnt < pkt->entry_count)
  3308. rc = -EIO;
  3309. ql_dbg(ql_dbg_init, vha, 0x5091,
  3310. "%s - ring %p pkt %p entry count %d iocb_cnt %d rsp_q_in %d rc %d\n",
  3311. __func__, rsp->ring, pkt, pkt->entry_count, iocb_cnt, rsp_q_in, rc);
  3312. return rc;
  3313. }
  3314. static void qla_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
  3315. struct mrk_entry_24xx *pkt)
  3316. {
  3317. const char func[] = "MRK-IOCB";
  3318. srb_t *sp;
  3319. int res = QLA_SUCCESS;
  3320. if (!IS_FWI2_CAPABLE(vha->hw))
  3321. return;
  3322. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  3323. if (!sp)
  3324. return;
  3325. if (pkt->entry_status) {
  3326. ql_dbg(ql_dbg_taskm, vha, 0x8025, "marker failure.\n");
  3327. res = QLA_COMMAND_ERROR;
  3328. }
  3329. sp->u.iocb_cmd.u.tmf.data = res;
  3330. sp->done(sp, res);
  3331. }
  3332. /**
  3333. * qla24xx_process_response_queue() - Process response queue entries.
  3334. * @vha: SCSI driver HA context
  3335. * @rsp: response queue
  3336. */
  3337. void qla24xx_process_response_queue(struct scsi_qla_host *vha,
  3338. struct rsp_que *rsp)
  3339. {
  3340. struct sts_entry_24xx *pkt;
  3341. struct qla_hw_data *ha = vha->hw;
  3342. struct purex_entry_24xx *purex_entry;
  3343. struct purex_item *pure_item;
  3344. u16 rsp_in = 0, cur_ring_index;
  3345. int is_shadow_hba;
  3346. if (!ha->flags.fw_started)
  3347. return;
  3348. if (rsp->qpair->cpuid != raw_smp_processor_id() || !rsp->qpair->rcv_intr) {
  3349. rsp->qpair->rcv_intr = 1;
  3350. if (!rsp->qpair->cpu_mapped)
  3351. qla_cpu_update(rsp->qpair, raw_smp_processor_id());
  3352. }
  3353. #define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \
  3354. do { \
  3355. _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \
  3356. rd_reg_dword_relaxed((_rsp)->rsp_q_in); \
  3357. } while (0)
  3358. is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha);
  3359. __update_rsp_in(is_shadow_hba, rsp, rsp_in);
  3360. while (rsp->ring_index != rsp_in &&
  3361. rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
  3362. pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
  3363. cur_ring_index = rsp->ring_index;
  3364. rsp->ring_index++;
  3365. if (rsp->ring_index == rsp->length) {
  3366. rsp->ring_index = 0;
  3367. rsp->ring_ptr = rsp->ring;
  3368. } else {
  3369. rsp->ring_ptr++;
  3370. }
  3371. if (pkt->entry_status != 0) {
  3372. if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
  3373. goto process_err;
  3374. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  3375. wmb();
  3376. continue;
  3377. }
  3378. process_err:
  3379. switch (pkt->entry_type) {
  3380. case STATUS_TYPE:
  3381. qla2x00_status_entry(vha, rsp, pkt);
  3382. break;
  3383. case STATUS_CONT_TYPE:
  3384. qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
  3385. break;
  3386. case VP_RPT_ID_IOCB_TYPE:
  3387. qla24xx_report_id_acquisition(vha,
  3388. (struct vp_rpt_id_entry_24xx *)pkt);
  3389. break;
  3390. case LOGINOUT_PORT_IOCB_TYPE:
  3391. qla24xx_logio_entry(vha, rsp->req,
  3392. (struct logio_entry_24xx *)pkt);
  3393. break;
  3394. case CT_IOCB_TYPE:
  3395. qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
  3396. break;
  3397. case ELS_IOCB_TYPE:
  3398. qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
  3399. break;
  3400. case ABTS_RECV_24XX:
  3401. if (qla_ini_mode_enabled(vha)) {
  3402. pure_item = qla24xx_copy_std_pkt(vha, pkt);
  3403. if (!pure_item)
  3404. break;
  3405. qla24xx_queue_purex_item(vha, pure_item,
  3406. qla24xx_process_abts);
  3407. break;
  3408. }
  3409. if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
  3410. IS_QLA28XX(ha)) {
  3411. /* ensure that the ATIO queue is empty */
  3412. qlt_handle_abts_recv(vha, rsp,
  3413. (response_t *)pkt);
  3414. break;
  3415. } else {
  3416. qlt_24xx_process_atio_queue(vha, 1);
  3417. }
  3418. fallthrough;
  3419. case ABTS_RESP_24XX:
  3420. case CTIO_TYPE7:
  3421. case CTIO_CRC2:
  3422. qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
  3423. break;
  3424. case PT_LS4_REQUEST:
  3425. qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
  3426. rsp->req);
  3427. break;
  3428. case NOTIFY_ACK_TYPE:
  3429. if (pkt->handle == QLA_TGT_SKIP_HANDLE)
  3430. qlt_response_pkt_all_vps(vha, rsp,
  3431. (response_t *)pkt);
  3432. else
  3433. qla24xxx_nack_iocb_entry(vha, rsp->req,
  3434. (struct nack_to_isp *)pkt);
  3435. break;
  3436. case MARKER_TYPE:
  3437. qla_marker_iocb_entry(vha, rsp->req, (struct mrk_entry_24xx *)pkt);
  3438. break;
  3439. case ABORT_IOCB_TYPE:
  3440. qla24xx_abort_iocb_entry(vha, rsp->req,
  3441. (struct abort_entry_24xx *)pkt);
  3442. break;
  3443. case MBX_IOCB_TYPE:
  3444. qla24xx_mbx_iocb_entry(vha, rsp->req,
  3445. (struct mbx_24xx_entry *)pkt);
  3446. break;
  3447. case VP_CTRL_IOCB_TYPE:
  3448. qla_ctrlvp_completed(vha, rsp->req,
  3449. (struct vp_ctrl_entry_24xx *)pkt);
  3450. break;
  3451. case PUREX_IOCB_TYPE:
  3452. purex_entry = (void *)pkt;
  3453. switch (purex_entry->els_frame_payload[3]) {
  3454. case ELS_RDP:
  3455. pure_item = qla24xx_copy_std_pkt(vha, pkt);
  3456. if (!pure_item)
  3457. break;
  3458. qla24xx_queue_purex_item(vha, pure_item,
  3459. qla24xx_process_purex_rdp);
  3460. break;
  3461. case ELS_FPIN:
  3462. if (!vha->hw->flags.scm_enabled) {
  3463. ql_log(ql_log_warn, vha, 0x5094,
  3464. "SCM not active for this port\n");
  3465. break;
  3466. }
  3467. pure_item = qla27xx_copy_fpin_pkt(vha,
  3468. (void **)&pkt, &rsp);
  3469. __update_rsp_in(is_shadow_hba, rsp, rsp_in);
  3470. if (!pure_item)
  3471. break;
  3472. qla24xx_queue_purex_item(vha, pure_item,
  3473. qla27xx_process_purex_fpin);
  3474. break;
  3475. case ELS_AUTH_ELS:
  3476. if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) {
  3477. /*
  3478. * ring_ptr and ring_index were
  3479. * pre-incremented above. Reset them
  3480. * back to current. Wait for next
  3481. * interrupt with all IOCBs to arrive
  3482. * and re-process.
  3483. */
  3484. rsp->ring_ptr = (response_t *)pkt;
  3485. rsp->ring_index = cur_ring_index;
  3486. ql_dbg(ql_dbg_init, vha, 0x5091,
  3487. "Defer processing ELS opcode %#x...\n",
  3488. purex_entry->els_frame_payload[3]);
  3489. return;
  3490. }
  3491. qla24xx_auth_els(vha, (void **)&pkt, &rsp);
  3492. break;
  3493. default:
  3494. ql_log(ql_log_warn, vha, 0x509c,
  3495. "Discarding ELS Request opcode 0x%x\n",
  3496. purex_entry->els_frame_payload[3]);
  3497. }
  3498. break;
  3499. case SA_UPDATE_IOCB_TYPE:
  3500. qla28xx_sa_update_iocb_entry(vha, rsp->req,
  3501. (struct sa_update_28xx *)pkt);
  3502. break;
  3503. default:
  3504. /* Type Not Supported. */
  3505. ql_dbg(ql_dbg_async, vha, 0x5042,
  3506. "Received unknown response pkt type 0x%x entry status=%x.\n",
  3507. pkt->entry_type, pkt->entry_status);
  3508. break;
  3509. }
  3510. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  3511. wmb();
  3512. }
  3513. /* Adjust ring index */
  3514. if (IS_P3P_TYPE(ha)) {
  3515. struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
  3516. wrt_reg_dword(&reg->rsp_q_out[0], rsp->ring_index);
  3517. } else {
  3518. wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
  3519. }
  3520. }
  3521. static void
  3522. qla2xxx_check_risc_status(scsi_qla_host_t *vha)
  3523. {
  3524. int rval;
  3525. uint32_t cnt;
  3526. struct qla_hw_data *ha = vha->hw;
  3527. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  3528. if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
  3529. !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  3530. return;
  3531. rval = QLA_SUCCESS;
  3532. wrt_reg_dword(&reg->iobase_addr, 0x7C00);
  3533. rd_reg_dword(&reg->iobase_addr);
  3534. wrt_reg_dword(&reg->iobase_window, 0x0001);
  3535. for (cnt = 10000; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 &&
  3536. rval == QLA_SUCCESS; cnt--) {
  3537. if (cnt) {
  3538. wrt_reg_dword(&reg->iobase_window, 0x0001);
  3539. udelay(10);
  3540. } else
  3541. rval = QLA_FUNCTION_TIMEOUT;
  3542. }
  3543. if (rval == QLA_SUCCESS)
  3544. goto next_test;
  3545. rval = QLA_SUCCESS;
  3546. wrt_reg_dword(&reg->iobase_window, 0x0003);
  3547. for (cnt = 100; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 &&
  3548. rval == QLA_SUCCESS; cnt--) {
  3549. if (cnt) {
  3550. wrt_reg_dword(&reg->iobase_window, 0x0003);
  3551. udelay(10);
  3552. } else
  3553. rval = QLA_FUNCTION_TIMEOUT;
  3554. }
  3555. if (rval != QLA_SUCCESS)
  3556. goto done;
  3557. next_test:
  3558. if (rd_reg_dword(&reg->iobase_c8) & BIT_3)
  3559. ql_log(ql_log_info, vha, 0x504c,
  3560. "Additional code -- 0x55AA.\n");
  3561. done:
  3562. wrt_reg_dword(&reg->iobase_window, 0x0000);
  3563. rd_reg_dword(&reg->iobase_window);
  3564. }
  3565. /**
  3566. * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
  3567. * @irq: interrupt number
  3568. * @dev_id: SCSI driver HA context
  3569. *
  3570. * Called by system whenever the host adapter generates an interrupt.
  3571. *
  3572. * Returns handled flag.
  3573. */
  3574. irqreturn_t
  3575. qla24xx_intr_handler(int irq, void *dev_id)
  3576. {
  3577. scsi_qla_host_t *vha;
  3578. struct qla_hw_data *ha;
  3579. struct device_reg_24xx __iomem *reg;
  3580. int status;
  3581. unsigned long iter;
  3582. uint32_t stat;
  3583. uint32_t hccr;
  3584. uint16_t mb[8];
  3585. struct rsp_que *rsp;
  3586. unsigned long flags;
  3587. bool process_atio = false;
  3588. rsp = (struct rsp_que *) dev_id;
  3589. if (!rsp) {
  3590. ql_log(ql_log_info, NULL, 0x5059,
  3591. "%s: NULL response queue pointer.\n", __func__);
  3592. return IRQ_NONE;
  3593. }
  3594. ha = rsp->hw;
  3595. reg = &ha->iobase->isp24;
  3596. status = 0;
  3597. if (unlikely(pci_channel_offline(ha->pdev)))
  3598. return IRQ_HANDLED;
  3599. spin_lock_irqsave(&ha->hardware_lock, flags);
  3600. vha = pci_get_drvdata(ha->pdev);
  3601. for (iter = 50; iter--; ) {
  3602. stat = rd_reg_dword(&reg->host_status);
  3603. if (qla2x00_check_reg32_for_disconnect(vha, stat))
  3604. break;
  3605. if (stat & HSRX_RISC_PAUSED) {
  3606. if (unlikely(pci_channel_offline(ha->pdev)))
  3607. break;
  3608. hccr = rd_reg_dword(&reg->hccr);
  3609. ql_log(ql_log_warn, vha, 0x504b,
  3610. "RISC paused -- HCCR=%x, Dumping firmware.\n",
  3611. hccr);
  3612. qla2xxx_check_risc_status(vha);
  3613. ha->isp_ops->fw_dump(vha);
  3614. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  3615. break;
  3616. } else if ((stat & HSRX_RISC_INT) == 0)
  3617. break;
  3618. switch (stat & 0xff) {
  3619. case INTR_ROM_MB_SUCCESS:
  3620. case INTR_ROM_MB_FAILED:
  3621. case INTR_MB_SUCCESS:
  3622. case INTR_MB_FAILED:
  3623. qla24xx_mbx_completion(vha, MSW(stat));
  3624. status |= MBX_INTERRUPT;
  3625. break;
  3626. case INTR_ASYNC_EVENT:
  3627. mb[0] = MSW(stat);
  3628. mb[1] = rd_reg_word(&reg->mailbox1);
  3629. mb[2] = rd_reg_word(&reg->mailbox2);
  3630. mb[3] = rd_reg_word(&reg->mailbox3);
  3631. qla2x00_async_event(vha, rsp, mb);
  3632. break;
  3633. case INTR_RSP_QUE_UPDATE:
  3634. case INTR_RSP_QUE_UPDATE_83XX:
  3635. qla24xx_process_response_queue(vha, rsp);
  3636. break;
  3637. case INTR_ATIO_QUE_UPDATE_27XX:
  3638. case INTR_ATIO_QUE_UPDATE:
  3639. process_atio = true;
  3640. break;
  3641. case INTR_ATIO_RSP_QUE_UPDATE:
  3642. process_atio = true;
  3643. qla24xx_process_response_queue(vha, rsp);
  3644. break;
  3645. default:
  3646. ql_dbg(ql_dbg_async, vha, 0x504f,
  3647. "Unrecognized interrupt type (%d).\n", stat * 0xff);
  3648. break;
  3649. }
  3650. wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
  3651. rd_reg_dword_relaxed(&reg->hccr);
  3652. if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
  3653. ndelay(3500);
  3654. }
  3655. qla2x00_handle_mbx_completion(ha, status);
  3656. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3657. if (process_atio) {
  3658. spin_lock_irqsave(&ha->tgt.atio_lock, flags);
  3659. qlt_24xx_process_atio_queue(vha, 0);
  3660. spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
  3661. }
  3662. return IRQ_HANDLED;
  3663. }
  3664. static irqreturn_t
  3665. qla24xx_msix_rsp_q(int irq, void *dev_id)
  3666. {
  3667. struct qla_hw_data *ha;
  3668. struct rsp_que *rsp;
  3669. struct device_reg_24xx __iomem *reg;
  3670. struct scsi_qla_host *vha;
  3671. unsigned long flags;
  3672. rsp = (struct rsp_que *) dev_id;
  3673. if (!rsp) {
  3674. ql_log(ql_log_info, NULL, 0x505a,
  3675. "%s: NULL response queue pointer.\n", __func__);
  3676. return IRQ_NONE;
  3677. }
  3678. ha = rsp->hw;
  3679. reg = &ha->iobase->isp24;
  3680. spin_lock_irqsave(&ha->hardware_lock, flags);
  3681. vha = pci_get_drvdata(ha->pdev);
  3682. qla24xx_process_response_queue(vha, rsp);
  3683. if (!ha->flags.disable_msix_handshake) {
  3684. wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
  3685. rd_reg_dword_relaxed(&reg->hccr);
  3686. }
  3687. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3688. return IRQ_HANDLED;
  3689. }
  3690. static irqreturn_t
  3691. qla24xx_msix_default(int irq, void *dev_id)
  3692. {
  3693. scsi_qla_host_t *vha;
  3694. struct qla_hw_data *ha;
  3695. struct rsp_que *rsp;
  3696. struct device_reg_24xx __iomem *reg;
  3697. int status;
  3698. uint32_t stat;
  3699. uint32_t hccr;
  3700. uint16_t mb[8];
  3701. unsigned long flags;
  3702. bool process_atio = false;
  3703. rsp = (struct rsp_que *) dev_id;
  3704. if (!rsp) {
  3705. ql_log(ql_log_info, NULL, 0x505c,
  3706. "%s: NULL response queue pointer.\n", __func__);
  3707. return IRQ_NONE;
  3708. }
  3709. ha = rsp->hw;
  3710. reg = &ha->iobase->isp24;
  3711. status = 0;
  3712. spin_lock_irqsave(&ha->hardware_lock, flags);
  3713. vha = pci_get_drvdata(ha->pdev);
  3714. do {
  3715. stat = rd_reg_dword(&reg->host_status);
  3716. if (qla2x00_check_reg32_for_disconnect(vha, stat))
  3717. break;
  3718. if (stat & HSRX_RISC_PAUSED) {
  3719. if (unlikely(pci_channel_offline(ha->pdev)))
  3720. break;
  3721. hccr = rd_reg_dword(&reg->hccr);
  3722. ql_log(ql_log_info, vha, 0x5050,
  3723. "RISC paused -- HCCR=%x, Dumping firmware.\n",
  3724. hccr);
  3725. qla2xxx_check_risc_status(vha);
  3726. vha->hw_err_cnt++;
  3727. ha->isp_ops->fw_dump(vha);
  3728. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  3729. break;
  3730. } else if ((stat & HSRX_RISC_INT) == 0)
  3731. break;
  3732. switch (stat & 0xff) {
  3733. case INTR_ROM_MB_SUCCESS:
  3734. case INTR_ROM_MB_FAILED:
  3735. case INTR_MB_SUCCESS:
  3736. case INTR_MB_FAILED:
  3737. qla24xx_mbx_completion(vha, MSW(stat));
  3738. status |= MBX_INTERRUPT;
  3739. break;
  3740. case INTR_ASYNC_EVENT:
  3741. mb[0] = MSW(stat);
  3742. mb[1] = rd_reg_word(&reg->mailbox1);
  3743. mb[2] = rd_reg_word(&reg->mailbox2);
  3744. mb[3] = rd_reg_word(&reg->mailbox3);
  3745. qla2x00_async_event(vha, rsp, mb);
  3746. break;
  3747. case INTR_RSP_QUE_UPDATE:
  3748. case INTR_RSP_QUE_UPDATE_83XX:
  3749. qla24xx_process_response_queue(vha, rsp);
  3750. break;
  3751. case INTR_ATIO_QUE_UPDATE_27XX:
  3752. case INTR_ATIO_QUE_UPDATE:
  3753. process_atio = true;
  3754. break;
  3755. case INTR_ATIO_RSP_QUE_UPDATE:
  3756. process_atio = true;
  3757. qla24xx_process_response_queue(vha, rsp);
  3758. break;
  3759. default:
  3760. ql_dbg(ql_dbg_async, vha, 0x5051,
  3761. "Unrecognized interrupt type (%d).\n", stat & 0xff);
  3762. break;
  3763. }
  3764. wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
  3765. } while (0);
  3766. qla2x00_handle_mbx_completion(ha, status);
  3767. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3768. if (process_atio) {
  3769. spin_lock_irqsave(&ha->tgt.atio_lock, flags);
  3770. qlt_24xx_process_atio_queue(vha, 0);
  3771. spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
  3772. }
  3773. return IRQ_HANDLED;
  3774. }
  3775. irqreturn_t
  3776. qla2xxx_msix_rsp_q(int irq, void *dev_id)
  3777. {
  3778. struct qla_hw_data *ha;
  3779. struct qla_qpair *qpair;
  3780. qpair = dev_id;
  3781. if (!qpair) {
  3782. ql_log(ql_log_info, NULL, 0x505b,
  3783. "%s: NULL response queue pointer.\n", __func__);
  3784. return IRQ_NONE;
  3785. }
  3786. ha = qpair->hw;
  3787. queue_work(ha->wq, &qpair->q_work);
  3788. return IRQ_HANDLED;
  3789. }
  3790. irqreturn_t
  3791. qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
  3792. {
  3793. struct qla_hw_data *ha;
  3794. struct qla_qpair *qpair;
  3795. struct device_reg_24xx __iomem *reg;
  3796. unsigned long flags;
  3797. qpair = dev_id;
  3798. if (!qpair) {
  3799. ql_log(ql_log_info, NULL, 0x505b,
  3800. "%s: NULL response queue pointer.\n", __func__);
  3801. return IRQ_NONE;
  3802. }
  3803. ha = qpair->hw;
  3804. reg = &ha->iobase->isp24;
  3805. spin_lock_irqsave(&ha->hardware_lock, flags);
  3806. wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
  3807. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3808. queue_work(ha->wq, &qpair->q_work);
  3809. return IRQ_HANDLED;
  3810. }
  3811. /* Interrupt handling helpers. */
  3812. struct qla_init_msix_entry {
  3813. const char *name;
  3814. irq_handler_t handler;
  3815. };
  3816. static const struct qla_init_msix_entry msix_entries[] = {
  3817. { "default", qla24xx_msix_default },
  3818. { "rsp_q", qla24xx_msix_rsp_q },
  3819. { "atio_q", qla83xx_msix_atio_q },
  3820. { "qpair_multiq", qla2xxx_msix_rsp_q },
  3821. { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
  3822. };
  3823. static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
  3824. { "qla2xxx (default)", qla82xx_msix_default },
  3825. { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
  3826. };
  3827. static int
  3828. qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
  3829. {
  3830. int i, ret;
  3831. struct qla_msix_entry *qentry;
  3832. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  3833. int min_vecs = QLA_BASE_VECTORS;
  3834. struct irq_affinity desc = {
  3835. .pre_vectors = QLA_BASE_VECTORS,
  3836. };
  3837. if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
  3838. IS_ATIO_MSIX_CAPABLE(ha)) {
  3839. desc.pre_vectors++;
  3840. min_vecs++;
  3841. }
  3842. if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
  3843. /* user wants to control IRQ setting for target mode */
  3844. ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
  3845. min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
  3846. PCI_IRQ_MSIX);
  3847. } else
  3848. ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
  3849. min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
  3850. PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
  3851. &desc);
  3852. if (ret < 0) {
  3853. ql_log(ql_log_fatal, vha, 0x00c7,
  3854. "MSI-X: Failed to enable support, "
  3855. "giving up -- %d/%d.\n",
  3856. ha->msix_count, ret);
  3857. goto msix_out;
  3858. } else if (ret < ha->msix_count) {
  3859. ql_log(ql_log_info, vha, 0x00c6,
  3860. "MSI-X: Using %d vectors\n", ret);
  3861. ha->msix_count = ret;
  3862. /* Recalculate queue values */
  3863. if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
  3864. ha->max_req_queues = ha->msix_count - 1;
  3865. /* ATIOQ needs 1 vector. That's 1 less QPair */
  3866. if (QLA_TGT_MODE_ENABLED())
  3867. ha->max_req_queues--;
  3868. ha->max_rsp_queues = ha->max_req_queues;
  3869. ha->max_qpairs = ha->max_req_queues - 1;
  3870. ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
  3871. "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
  3872. }
  3873. }
  3874. vha->irq_offset = desc.pre_vectors;
  3875. ha->msix_entries = kcalloc(ha->msix_count,
  3876. sizeof(struct qla_msix_entry),
  3877. GFP_KERNEL);
  3878. if (!ha->msix_entries) {
  3879. ql_log(ql_log_fatal, vha, 0x00c8,
  3880. "Failed to allocate memory for ha->msix_entries.\n");
  3881. ret = -ENOMEM;
  3882. goto free_irqs;
  3883. }
  3884. ha->flags.msix_enabled = 1;
  3885. for (i = 0; i < ha->msix_count; i++) {
  3886. qentry = &ha->msix_entries[i];
  3887. qentry->vector = pci_irq_vector(ha->pdev, i);
  3888. qentry->vector_base0 = i;
  3889. qentry->entry = i;
  3890. qentry->have_irq = 0;
  3891. qentry->in_use = 0;
  3892. qentry->handle = NULL;
  3893. }
  3894. /* Enable MSI-X vectors for the base queue */
  3895. for (i = 0; i < QLA_BASE_VECTORS; i++) {
  3896. qentry = &ha->msix_entries[i];
  3897. qentry->handle = rsp;
  3898. rsp->msix = qentry;
  3899. scnprintf(qentry->name, sizeof(qentry->name),
  3900. "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
  3901. if (IS_P3P_TYPE(ha))
  3902. ret = request_irq(qentry->vector,
  3903. qla82xx_msix_entries[i].handler,
  3904. 0, qla82xx_msix_entries[i].name, rsp);
  3905. else
  3906. ret = request_irq(qentry->vector,
  3907. msix_entries[i].handler,
  3908. 0, qentry->name, rsp);
  3909. if (ret)
  3910. goto msix_register_fail;
  3911. qentry->have_irq = 1;
  3912. qentry->in_use = 1;
  3913. }
  3914. /*
  3915. * If target mode is enable, also request the vector for the ATIO
  3916. * queue.
  3917. */
  3918. if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
  3919. IS_ATIO_MSIX_CAPABLE(ha)) {
  3920. qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
  3921. rsp->msix = qentry;
  3922. qentry->handle = rsp;
  3923. scnprintf(qentry->name, sizeof(qentry->name),
  3924. "qla2xxx%lu_%s", vha->host_no,
  3925. msix_entries[QLA_ATIO_VECTOR].name);
  3926. qentry->in_use = 1;
  3927. ret = request_irq(qentry->vector,
  3928. msix_entries[QLA_ATIO_VECTOR].handler,
  3929. 0, qentry->name, rsp);
  3930. qentry->have_irq = 1;
  3931. }
  3932. msix_register_fail:
  3933. if (ret) {
  3934. ql_log(ql_log_fatal, vha, 0x00cb,
  3935. "MSI-X: unable to register handler -- %x/%d.\n",
  3936. qentry->vector, ret);
  3937. qla2x00_free_irqs(vha);
  3938. ha->mqenable = 0;
  3939. goto msix_out;
  3940. }
  3941. /* Enable MSI-X vector for response queue update for queue 0 */
  3942. if (IS_MQUE_CAPABLE(ha) &&
  3943. (ha->msixbase && ha->mqiobase && ha->max_qpairs))
  3944. ha->mqenable = 1;
  3945. else
  3946. ha->mqenable = 0;
  3947. ql_dbg(ql_dbg_multiq, vha, 0xc005,
  3948. "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
  3949. ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
  3950. ql_dbg(ql_dbg_init, vha, 0x0055,
  3951. "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
  3952. ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
  3953. msix_out:
  3954. return ret;
  3955. free_irqs:
  3956. pci_free_irq_vectors(ha->pdev);
  3957. goto msix_out;
  3958. }
  3959. int
  3960. qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
  3961. {
  3962. int ret = QLA_FUNCTION_FAILED;
  3963. device_reg_t *reg = ha->iobase;
  3964. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  3965. /* If possible, enable MSI-X. */
  3966. if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
  3967. !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
  3968. !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
  3969. goto skip_msi;
  3970. if (ql2xenablemsix == 2)
  3971. goto skip_msix;
  3972. if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
  3973. (ha->pdev->subsystem_device == 0x7040 ||
  3974. ha->pdev->subsystem_device == 0x7041 ||
  3975. ha->pdev->subsystem_device == 0x1705)) {
  3976. ql_log(ql_log_warn, vha, 0x0034,
  3977. "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
  3978. ha->pdev->subsystem_vendor,
  3979. ha->pdev->subsystem_device);
  3980. goto skip_msi;
  3981. }
  3982. if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
  3983. ql_log(ql_log_warn, vha, 0x0035,
  3984. "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
  3985. ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
  3986. goto skip_msix;
  3987. }
  3988. ret = qla24xx_enable_msix(ha, rsp);
  3989. if (!ret) {
  3990. ql_dbg(ql_dbg_init, vha, 0x0036,
  3991. "MSI-X: Enabled (0x%X, 0x%X).\n",
  3992. ha->chip_revision, ha->fw_attributes);
  3993. goto clear_risc_ints;
  3994. }
  3995. skip_msix:
  3996. ql_log(ql_log_info, vha, 0x0037,
  3997. "Falling back-to MSI mode -- ret=%d.\n", ret);
  3998. if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
  3999. !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
  4000. !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  4001. goto skip_msi;
  4002. ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
  4003. if (ret > 0) {
  4004. ql_dbg(ql_dbg_init, vha, 0x0038,
  4005. "MSI: Enabled.\n");
  4006. ha->flags.msi_enabled = 1;
  4007. } else
  4008. ql_log(ql_log_warn, vha, 0x0039,
  4009. "Falling back-to INTa mode -- ret=%d.\n", ret);
  4010. skip_msi:
  4011. /* Skip INTx on ISP82xx. */
  4012. if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
  4013. return QLA_FUNCTION_FAILED;
  4014. ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
  4015. ha->flags.msi_enabled ? 0 : IRQF_SHARED,
  4016. QLA2XXX_DRIVER_NAME, rsp);
  4017. if (ret) {
  4018. ql_log(ql_log_warn, vha, 0x003a,
  4019. "Failed to reserve interrupt %d already in use.\n",
  4020. ha->pdev->irq);
  4021. goto fail;
  4022. } else if (!ha->flags.msi_enabled) {
  4023. ql_dbg(ql_dbg_init, vha, 0x0125,
  4024. "INTa mode: Enabled.\n");
  4025. ha->flags.mr_intr_valid = 1;
  4026. /* Set max_qpair to 0, as MSI-X and MSI in not enabled */
  4027. ha->max_qpairs = 0;
  4028. }
  4029. clear_risc_ints:
  4030. if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
  4031. goto fail;
  4032. spin_lock_irq(&ha->hardware_lock);
  4033. wrt_reg_word(&reg->isp.semaphore, 0);
  4034. spin_unlock_irq(&ha->hardware_lock);
  4035. fail:
  4036. return ret;
  4037. }
  4038. void
  4039. qla2x00_free_irqs(scsi_qla_host_t *vha)
  4040. {
  4041. struct qla_hw_data *ha = vha->hw;
  4042. struct rsp_que *rsp;
  4043. struct qla_msix_entry *qentry;
  4044. int i;
  4045. /*
  4046. * We need to check that ha->rsp_q_map is valid in case we are called
  4047. * from a probe failure context.
  4048. */
  4049. if (!ha->rsp_q_map || !ha->rsp_q_map[0])
  4050. goto free_irqs;
  4051. rsp = ha->rsp_q_map[0];
  4052. if (ha->flags.msix_enabled) {
  4053. for (i = 0; i < ha->msix_count; i++) {
  4054. qentry = &ha->msix_entries[i];
  4055. if (qentry->have_irq) {
  4056. irq_set_affinity_notifier(qentry->vector, NULL);
  4057. free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
  4058. }
  4059. }
  4060. kfree(ha->msix_entries);
  4061. ha->msix_entries = NULL;
  4062. ha->flags.msix_enabled = 0;
  4063. ql_dbg(ql_dbg_init, vha, 0x0042,
  4064. "Disabled MSI-X.\n");
  4065. } else {
  4066. free_irq(pci_irq_vector(ha->pdev, 0), rsp);
  4067. }
  4068. free_irqs:
  4069. pci_free_irq_vectors(ha->pdev);
  4070. }
  4071. int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
  4072. struct qla_msix_entry *msix, int vector_type)
  4073. {
  4074. const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
  4075. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  4076. int ret;
  4077. scnprintf(msix->name, sizeof(msix->name),
  4078. "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
  4079. ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
  4080. if (ret) {
  4081. ql_log(ql_log_fatal, vha, 0x00e6,
  4082. "MSI-X: Unable to register handler -- %x/%d.\n",
  4083. msix->vector, ret);
  4084. return ret;
  4085. }
  4086. msix->have_irq = 1;
  4087. msix->handle = qpair;
  4088. qla_mapq_init_qp_cpu_map(ha, msix, qpair);
  4089. return ret;
  4090. }