lpfc_nportdisc.c 92 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
  5. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
  6. * Copyright (C) 2004-2016 Emulex. All rights reserved. *
  7. * EMULEX and SLI are trademarks of Emulex. *
  8. * www.broadcom.com *
  9. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  10. * *
  11. * This program is free software; you can redistribute it and/or *
  12. * modify it under the terms of version 2 of the GNU General *
  13. * Public License as published by the Free Software Foundation. *
  14. * This program is distributed in the hope that it will be useful. *
  15. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  16. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  17. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  18. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  20. * more details, a copy of which can be found in the file COPYING *
  21. * included with this package. *
  22. *******************************************************************/
  23. #include <linux/blkdev.h>
  24. #include <linux/pci.h>
  25. #include <linux/slab.h>
  26. #include <linux/interrupt.h>
  27. #include <scsi/scsi.h>
  28. #include <scsi/scsi_device.h>
  29. #include <scsi/scsi_host.h>
  30. #include <scsi/scsi_transport_fc.h>
  31. #include <scsi/fc/fc_fs.h>
  32. #include "lpfc_hw4.h"
  33. #include "lpfc_hw.h"
  34. #include "lpfc_sli.h"
  35. #include "lpfc_sli4.h"
  36. #include "lpfc_nl.h"
  37. #include "lpfc_disc.h"
  38. #include "lpfc.h"
  39. #include "lpfc_scsi.h"
  40. #include "lpfc_nvme.h"
  41. #include "lpfc_logmsg.h"
  42. #include "lpfc_crtn.h"
  43. #include "lpfc_vport.h"
  44. #include "lpfc_debugfs.h"
  45. /* Called to verify a rcv'ed ADISC was intended for us. */
  46. static int
  47. lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  48. struct lpfc_name *nn, struct lpfc_name *pn)
  49. {
  50. /* First, we MUST have a RPI registered */
  51. if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
  52. return 0;
  53. /* Compare the ADISC rsp WWNN / WWPN matches our internal node
  54. * table entry for that node.
  55. */
  56. if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
  57. return 0;
  58. if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
  59. return 0;
  60. /* we match, return success */
  61. return 1;
  62. }
  63. int
  64. lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  65. struct serv_parm *sp, uint32_t class, int flogi)
  66. {
  67. volatile struct serv_parm *hsp = &vport->fc_sparam;
  68. uint16_t hsp_value, ssp_value = 0;
  69. /*
  70. * The receive data field size and buffer-to-buffer receive data field
  71. * size entries are 16 bits but are represented as two 8-bit fields in
  72. * the driver data structure to account for rsvd bits and other control
  73. * bits. Reconstruct and compare the fields as a 16-bit values before
  74. * correcting the byte values.
  75. */
  76. if (sp->cls1.classValid) {
  77. if (!flogi) {
  78. hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
  79. hsp->cls1.rcvDataSizeLsb);
  80. ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
  81. sp->cls1.rcvDataSizeLsb);
  82. if (!ssp_value)
  83. goto bad_service_param;
  84. if (ssp_value > hsp_value) {
  85. sp->cls1.rcvDataSizeLsb =
  86. hsp->cls1.rcvDataSizeLsb;
  87. sp->cls1.rcvDataSizeMsb =
  88. hsp->cls1.rcvDataSizeMsb;
  89. }
  90. }
  91. } else if (class == CLASS1)
  92. goto bad_service_param;
  93. if (sp->cls2.classValid) {
  94. if (!flogi) {
  95. hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
  96. hsp->cls2.rcvDataSizeLsb);
  97. ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
  98. sp->cls2.rcvDataSizeLsb);
  99. if (!ssp_value)
  100. goto bad_service_param;
  101. if (ssp_value > hsp_value) {
  102. sp->cls2.rcvDataSizeLsb =
  103. hsp->cls2.rcvDataSizeLsb;
  104. sp->cls2.rcvDataSizeMsb =
  105. hsp->cls2.rcvDataSizeMsb;
  106. }
  107. }
  108. } else if (class == CLASS2)
  109. goto bad_service_param;
  110. if (sp->cls3.classValid) {
  111. if (!flogi) {
  112. hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
  113. hsp->cls3.rcvDataSizeLsb);
  114. ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
  115. sp->cls3.rcvDataSizeLsb);
  116. if (!ssp_value)
  117. goto bad_service_param;
  118. if (ssp_value > hsp_value) {
  119. sp->cls3.rcvDataSizeLsb =
  120. hsp->cls3.rcvDataSizeLsb;
  121. sp->cls3.rcvDataSizeMsb =
  122. hsp->cls3.rcvDataSizeMsb;
  123. }
  124. }
  125. } else if (class == CLASS3)
  126. goto bad_service_param;
  127. /*
  128. * Preserve the upper four bits of the MSB from the PLOGI response.
  129. * These bits contain the Buffer-to-Buffer State Change Number
  130. * from the target and need to be passed to the FW.
  131. */
  132. hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
  133. ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
  134. if (ssp_value > hsp_value) {
  135. sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
  136. sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
  137. (hsp->cmn.bbRcvSizeMsb & 0x0F);
  138. }
  139. memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
  140. memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
  141. return 1;
  142. bad_service_param:
  143. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  144. "0207 Device %x "
  145. "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
  146. "invalid service parameters. Ignoring device.\n",
  147. ndlp->nlp_DID,
  148. sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
  149. sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
  150. sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
  151. sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
  152. return 0;
  153. }
  154. static void *
  155. lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  156. struct lpfc_iocbq *rspiocb)
  157. {
  158. struct lpfc_dmabuf *pcmd, *prsp;
  159. uint32_t *lp;
  160. void *ptr = NULL;
  161. u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
  162. pcmd = cmdiocb->cmd_dmabuf;
  163. /* For lpfc_els_abort, cmd_dmabuf could be zero'ed to delay
  164. * freeing associated memory till after ABTS completes.
  165. */
  166. if (pcmd) {
  167. prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf,
  168. list);
  169. if (prsp) {
  170. lp = (uint32_t *) prsp->virt;
  171. ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
  172. }
  173. } else {
  174. /* Force ulp_status error since we are returning NULL ptr */
  175. if (!(ulp_status)) {
  176. if (phba->sli_rev == LPFC_SLI_REV4) {
  177. bf_set(lpfc_wcqe_c_status, &rspiocb->wcqe_cmpl,
  178. IOSTAT_LOCAL_REJECT);
  179. rspiocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED;
  180. } else {
  181. rspiocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
  182. rspiocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
  183. }
  184. }
  185. ptr = NULL;
  186. }
  187. return ptr;
  188. }
  189. /*
  190. * Free resources / clean up outstanding I/Os
  191. * associated with a LPFC_NODELIST entry. This
  192. * routine effectively results in a "software abort".
  193. */
  194. void
  195. lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
  196. {
  197. LIST_HEAD(abort_list);
  198. struct lpfc_sli_ring *pring;
  199. struct lpfc_iocbq *iocb, *next_iocb;
  200. pring = lpfc_phba_elsring(phba);
  201. /* In case of error recovery path, we might have a NULL pring here */
  202. if (unlikely(!pring))
  203. return;
  204. /* Abort outstanding I/O on NPort <nlp_DID> */
  205. lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
  206. "2819 Abort outstanding I/O on NPort x%x "
  207. "Data: x%x x%x x%x\n",
  208. ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
  209. ndlp->nlp_rpi);
  210. /* Clean up all fabric IOs first.*/
  211. lpfc_fabric_abort_nport(ndlp);
  212. /*
  213. * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
  214. * of all ELS IOs that need an ABTS. The IOs need to stay on the
  215. * txcmplq so that the abort operation completes them successfully.
  216. */
  217. spin_lock_irq(&phba->hbalock);
  218. if (phba->sli_rev == LPFC_SLI_REV4)
  219. spin_lock(&pring->ring_lock);
  220. list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
  221. /* Add to abort_list on on NDLP match. */
  222. if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
  223. list_add_tail(&iocb->dlist, &abort_list);
  224. }
  225. if (phba->sli_rev == LPFC_SLI_REV4)
  226. spin_unlock(&pring->ring_lock);
  227. spin_unlock_irq(&phba->hbalock);
  228. /* Abort the targeted IOs and remove them from the abort list. */
  229. list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
  230. spin_lock_irq(&phba->hbalock);
  231. list_del_init(&iocb->dlist);
  232. lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
  233. spin_unlock_irq(&phba->hbalock);
  234. }
  235. /* Make sure HBA is alive */
  236. lpfc_issue_hb_tmo(phba);
  237. INIT_LIST_HEAD(&abort_list);
  238. /* Now process the txq */
  239. spin_lock_irq(&phba->hbalock);
  240. if (phba->sli_rev == LPFC_SLI_REV4)
  241. spin_lock(&pring->ring_lock);
  242. list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
  243. /* Check to see if iocb matches the nport we are looking for */
  244. if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
  245. list_del_init(&iocb->list);
  246. list_add_tail(&iocb->list, &abort_list);
  247. }
  248. }
  249. if (phba->sli_rev == LPFC_SLI_REV4)
  250. spin_unlock(&pring->ring_lock);
  251. spin_unlock_irq(&phba->hbalock);
  252. /* Cancel all the IOCBs from the completions list */
  253. lpfc_sli_cancel_iocbs(phba, &abort_list,
  254. IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
  255. lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
  256. }
  257. /* lpfc_defer_plogi_acc - Issue PLOGI ACC after reg_login completes
  258. * @phba: pointer to lpfc hba data structure.
  259. * @login_mbox: pointer to REG_RPI mailbox object
  260. *
  261. * The ACC for a rcv'ed PLOGI is deferred until AFTER the REG_RPI completes
  262. */
  263. static void
  264. lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
  265. {
  266. struct lpfc_iocbq *save_iocb;
  267. struct lpfc_nodelist *ndlp;
  268. MAILBOX_t *mb = &login_mbox->u.mb;
  269. int rc;
  270. ndlp = login_mbox->ctx_ndlp;
  271. save_iocb = login_mbox->context3;
  272. if (mb->mbxStatus == MBX_SUCCESS) {
  273. /* Now that REG_RPI completed successfully,
  274. * we can now proceed with sending the PLOGI ACC.
  275. */
  276. rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
  277. save_iocb, ndlp, NULL);
  278. if (rc) {
  279. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  280. "4576 PLOGI ACC fails pt2pt discovery: "
  281. "DID %x Data: %x\n", ndlp->nlp_DID, rc);
  282. }
  283. }
  284. /* Now process the REG_RPI cmpl */
  285. lpfc_mbx_cmpl_reg_login(phba, login_mbox);
  286. ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
  287. kfree(save_iocb);
  288. }
  289. static int
  290. lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  291. struct lpfc_iocbq *cmdiocb)
  292. {
  293. struct lpfc_hba *phba = vport->phba;
  294. struct lpfc_dmabuf *pcmd;
  295. uint64_t nlp_portwwn = 0;
  296. uint32_t *lp;
  297. union lpfc_wqe128 *wqe;
  298. IOCB_t *icmd;
  299. struct serv_parm *sp;
  300. uint32_t ed_tov;
  301. LPFC_MBOXQ_t *link_mbox;
  302. LPFC_MBOXQ_t *login_mbox;
  303. struct lpfc_iocbq *save_iocb;
  304. struct ls_rjt stat;
  305. uint32_t vid, flag;
  306. int rc;
  307. u32 remote_did;
  308. memset(&stat, 0, sizeof (struct ls_rjt));
  309. pcmd = cmdiocb->cmd_dmabuf;
  310. lp = (uint32_t *) pcmd->virt;
  311. sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
  312. if (wwn_to_u64(sp->portName.u.wwn) == 0) {
  313. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  314. "0140 PLOGI Reject: invalid pname\n");
  315. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  316. stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
  317. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
  318. NULL);
  319. return 0;
  320. }
  321. if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
  322. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  323. "0141 PLOGI Reject: invalid nname\n");
  324. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  325. stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
  326. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
  327. NULL);
  328. return 0;
  329. }
  330. nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
  331. if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
  332. /* Reject this request because invalid parameters */
  333. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  334. stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
  335. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
  336. NULL);
  337. return 0;
  338. }
  339. if (phba->sli_rev == LPFC_SLI_REV4)
  340. wqe = &cmdiocb->wqe;
  341. else
  342. icmd = &cmdiocb->iocb;
  343. /* PLOGI chkparm OK */
  344. lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
  345. "0114 PLOGI chkparm OK Data: x%x x%x x%x "
  346. "x%x x%x x%x\n",
  347. ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
  348. ndlp->nlp_rpi, vport->port_state,
  349. vport->fc_flag);
  350. if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
  351. ndlp->nlp_fcp_info |= CLASS2;
  352. else
  353. ndlp->nlp_fcp_info |= CLASS3;
  354. ndlp->nlp_class_sup = 0;
  355. if (sp->cls1.classValid)
  356. ndlp->nlp_class_sup |= FC_COS_CLASS1;
  357. if (sp->cls2.classValid)
  358. ndlp->nlp_class_sup |= FC_COS_CLASS2;
  359. if (sp->cls3.classValid)
  360. ndlp->nlp_class_sup |= FC_COS_CLASS3;
  361. if (sp->cls4.classValid)
  362. ndlp->nlp_class_sup |= FC_COS_CLASS4;
  363. ndlp->nlp_maxframe =
  364. ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
  365. /* if already logged in, do implicit logout */
  366. switch (ndlp->nlp_state) {
  367. case NLP_STE_NPR_NODE:
  368. if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
  369. break;
  370. fallthrough;
  371. case NLP_STE_REG_LOGIN_ISSUE:
  372. case NLP_STE_PRLI_ISSUE:
  373. case NLP_STE_UNMAPPED_NODE:
  374. case NLP_STE_MAPPED_NODE:
  375. /* For initiators, lpfc_plogi_confirm_nport skips fabric did.
  376. * For target mode, execute implicit logo.
  377. * Fabric nodes go into NPR.
  378. */
  379. if (!(ndlp->nlp_type & NLP_FABRIC) &&
  380. !(phba->nvmet_support)) {
  381. /* Clear ndlp info, since follow up PRLI may have
  382. * updated ndlp information
  383. */
  384. ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
  385. ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
  386. ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
  387. ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
  388. ndlp->nlp_flag &= ~NLP_FIRSTBURST;
  389. lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
  390. ndlp, NULL);
  391. return 1;
  392. }
  393. if (nlp_portwwn != 0 &&
  394. nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
  395. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  396. "0143 PLOGI recv'd from DID: x%x "
  397. "WWPN changed: old %llx new %llx\n",
  398. ndlp->nlp_DID,
  399. (unsigned long long)nlp_portwwn,
  400. (unsigned long long)
  401. wwn_to_u64(sp->portName.u.wwn));
  402. /* Notify transport of connectivity loss to trigger cleanup. */
  403. if (phba->nvmet_support &&
  404. ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
  405. lpfc_nvmet_invalidate_host(phba, ndlp);
  406. ndlp->nlp_prev_state = ndlp->nlp_state;
  407. /* rport needs to be unregistered first */
  408. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  409. break;
  410. }
  411. ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
  412. ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
  413. ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
  414. ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
  415. ndlp->nlp_flag &= ~NLP_FIRSTBURST;
  416. login_mbox = NULL;
  417. link_mbox = NULL;
  418. save_iocb = NULL;
  419. /* Check for Nport to NPort pt2pt protocol */
  420. if ((vport->fc_flag & FC_PT2PT) &&
  421. !(vport->fc_flag & FC_PT2PT_PLOGI)) {
  422. /* rcv'ed PLOGI decides what our NPortId will be */
  423. if (phba->sli_rev == LPFC_SLI_REV4) {
  424. vport->fc_myDID = bf_get(els_rsp64_sid,
  425. &cmdiocb->wqe.xmit_els_rsp);
  426. } else {
  427. vport->fc_myDID = icmd->un.rcvels.parmRo;
  428. }
  429. /* If there is an outstanding FLOGI, abort it now.
  430. * The remote NPort is not going to ACC our FLOGI
  431. * if its already issuing a PLOGI for pt2pt mode.
  432. * This indicates our FLOGI was dropped; however, we
  433. * must have ACCed the remote NPorts FLOGI to us
  434. * to make it here.
  435. */
  436. if (phba->hba_flag & HBA_FLOGI_OUTSTANDING)
  437. lpfc_els_abort_flogi(phba);
  438. ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
  439. if (sp->cmn.edtovResolution) {
  440. /* E_D_TOV ticks are in nanoseconds */
  441. ed_tov = (phba->fc_edtov + 999999) / 1000000;
  442. }
  443. /*
  444. * For pt-to-pt, use the larger EDTOV
  445. * RATOV = 2 * EDTOV
  446. */
  447. if (ed_tov > phba->fc_edtov)
  448. phba->fc_edtov = ed_tov;
  449. phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
  450. memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
  451. /* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4,
  452. * to account for updated TOV's / parameters
  453. */
  454. if (phba->sli_rev == LPFC_SLI_REV4)
  455. lpfc_issue_reg_vfi(vport);
  456. else {
  457. link_mbox = mempool_alloc(phba->mbox_mem_pool,
  458. GFP_KERNEL);
  459. if (!link_mbox)
  460. goto out;
  461. lpfc_config_link(phba, link_mbox);
  462. link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  463. link_mbox->vport = vport;
  464. /* The default completion handling for CONFIG_LINK
  465. * does not require the ndlp so no reference is needed.
  466. */
  467. link_mbox->ctx_ndlp = ndlp;
  468. rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
  469. if (rc == MBX_NOT_FINISHED) {
  470. mempool_free(link_mbox, phba->mbox_mem_pool);
  471. goto out;
  472. }
  473. }
  474. lpfc_can_disctmo(vport);
  475. }
  476. ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
  477. if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
  478. sp->cmn.valid_vendor_ver_level) {
  479. vid = be32_to_cpu(sp->un.vv.vid);
  480. flag = be32_to_cpu(sp->un.vv.flags);
  481. if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP))
  482. ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
  483. }
  484. login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  485. if (!login_mbox)
  486. goto out;
  487. save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
  488. if (!save_iocb)
  489. goto out;
  490. /* Save info from cmd IOCB to be used in rsp after all mbox completes */
  491. memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
  492. sizeof(struct lpfc_iocbq));
  493. /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
  494. if (phba->sli_rev == LPFC_SLI_REV4)
  495. lpfc_unreg_rpi(vport, ndlp);
  496. /* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will
  497. * always be deferring the ACC.
  498. */
  499. if (phba->sli_rev == LPFC_SLI_REV4)
  500. remote_did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest);
  501. else
  502. remote_did = icmd->un.rcvels.remoteID;
  503. rc = lpfc_reg_rpi(phba, vport->vpi, remote_did,
  504. (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
  505. if (rc)
  506. goto out;
  507. login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
  508. login_mbox->vport = vport;
  509. /*
  510. * If there is an outstanding PLOGI issued, abort it before
  511. * sending ACC rsp for received PLOGI. If pending plogi
  512. * is not canceled here, the plogi will be rejected by
  513. * remote port and will be retried. On a configuration with
  514. * single discovery thread, this will cause a huge delay in
  515. * discovery. Also this will cause multiple state machines
  516. * running in parallel for this node.
  517. * This only applies to a fabric environment.
  518. */
  519. if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
  520. (vport->fc_flag & FC_FABRIC)) {
  521. /* software abort outstanding PLOGI */
  522. lpfc_els_abort(phba, ndlp);
  523. }
  524. if ((vport->port_type == LPFC_NPIV_PORT &&
  525. vport->cfg_restrict_login)) {
  526. /* no deferred ACC */
  527. kfree(save_iocb);
  528. /* This is an NPIV SLI4 instance that does not need to register
  529. * a default RPI.
  530. */
  531. if (phba->sli_rev == LPFC_SLI_REV4) {
  532. lpfc_mbox_rsrc_cleanup(phba, login_mbox,
  533. MBOX_THD_UNLOCKED);
  534. login_mbox = NULL;
  535. } else {
  536. /* In order to preserve RPIs, we want to cleanup
  537. * the default RPI the firmware created to rcv
  538. * this ELS request. The only way to do this is
  539. * to register, then unregister the RPI.
  540. */
  541. spin_lock_irq(&ndlp->lock);
  542. ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
  543. NLP_RCV_PLOGI);
  544. spin_unlock_irq(&ndlp->lock);
  545. }
  546. stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
  547. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  548. rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
  549. ndlp, login_mbox);
  550. if (rc && login_mbox)
  551. lpfc_mbox_rsrc_cleanup(phba, login_mbox,
  552. MBOX_THD_UNLOCKED);
  553. return 1;
  554. }
  555. /* So the order here should be:
  556. * SLI3 pt2pt
  557. * Issue CONFIG_LINK mbox
  558. * CONFIG_LINK cmpl
  559. * SLI4 pt2pt
  560. * Issue REG_VFI mbox
  561. * REG_VFI cmpl
  562. * SLI4
  563. * Issue UNREG RPI mbx
  564. * UNREG RPI cmpl
  565. * Issue REG_RPI mbox
  566. * REG RPI cmpl
  567. * Issue PLOGI ACC
  568. * PLOGI ACC cmpl
  569. */
  570. login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
  571. login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
  572. if (!login_mbox->ctx_ndlp)
  573. goto out;
  574. login_mbox->context3 = save_iocb; /* For PLOGI ACC */
  575. spin_lock_irq(&ndlp->lock);
  576. ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
  577. spin_unlock_irq(&ndlp->lock);
  578. /* Start the ball rolling by issuing REG_LOGIN here */
  579. rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
  580. if (rc == MBX_NOT_FINISHED) {
  581. lpfc_nlp_put(ndlp);
  582. goto out;
  583. }
  584. lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
  585. return 1;
  586. out:
  587. kfree(save_iocb);
  588. if (login_mbox)
  589. mempool_free(login_mbox, phba->mbox_mem_pool);
  590. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  591. stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
  592. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  593. return 0;
  594. }
  595. /**
  596. * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
  597. * @phba: pointer to lpfc hba data structure.
  598. * @mboxq: pointer to mailbox object
  599. *
  600. * This routine is invoked to issue a completion to a rcv'ed
  601. * ADISC or PDISC after the paused RPI has been resumed.
  602. **/
  603. static void
  604. lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
  605. {
  606. struct lpfc_vport *vport;
  607. struct lpfc_iocbq *elsiocb;
  608. struct lpfc_nodelist *ndlp;
  609. uint32_t cmd;
  610. elsiocb = (struct lpfc_iocbq *)mboxq->ctx_buf;
  611. ndlp = (struct lpfc_nodelist *)mboxq->ctx_ndlp;
  612. vport = mboxq->vport;
  613. cmd = elsiocb->drvrTimeout;
  614. if (cmd == ELS_CMD_ADISC) {
  615. lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
  616. } else {
  617. lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
  618. ndlp, NULL);
  619. }
  620. /* This nlp_put pairs with lpfc_sli4_resume_rpi */
  621. lpfc_nlp_put(ndlp);
  622. kfree(elsiocb);
  623. mempool_free(mboxq, phba->mbox_mem_pool);
  624. }
  625. static int
  626. lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  627. struct lpfc_iocbq *cmdiocb)
  628. {
  629. struct lpfc_hba *phba = vport->phba;
  630. struct lpfc_iocbq *elsiocb;
  631. struct lpfc_dmabuf *pcmd;
  632. struct serv_parm *sp;
  633. struct lpfc_name *pnn, *ppn;
  634. struct ls_rjt stat;
  635. ADISC *ap;
  636. uint32_t *lp;
  637. uint32_t cmd;
  638. pcmd = cmdiocb->cmd_dmabuf;
  639. lp = (uint32_t *) pcmd->virt;
  640. cmd = *lp++;
  641. if (cmd == ELS_CMD_ADISC) {
  642. ap = (ADISC *) lp;
  643. pnn = (struct lpfc_name *) & ap->nodeName;
  644. ppn = (struct lpfc_name *) & ap->portName;
  645. } else {
  646. sp = (struct serv_parm *) lp;
  647. pnn = (struct lpfc_name *) & sp->nodeName;
  648. ppn = (struct lpfc_name *) & sp->portName;
  649. }
  650. if (get_job_ulpstatus(phba, cmdiocb) == 0 &&
  651. lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
  652. /*
  653. * As soon as we send ACC, the remote NPort can
  654. * start sending us data. Thus, for SLI4 we must
  655. * resume the RPI before the ACC goes out.
  656. */
  657. if (vport->phba->sli_rev == LPFC_SLI_REV4) {
  658. elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
  659. GFP_KERNEL);
  660. if (elsiocb) {
  661. /* Save info from cmd IOCB used in rsp */
  662. memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
  663. sizeof(struct lpfc_iocbq));
  664. /* Save the ELS cmd */
  665. elsiocb->drvrTimeout = cmd;
  666. lpfc_sli4_resume_rpi(ndlp,
  667. lpfc_mbx_cmpl_resume_rpi, elsiocb);
  668. goto out;
  669. }
  670. }
  671. if (cmd == ELS_CMD_ADISC) {
  672. lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
  673. } else {
  674. lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
  675. ndlp, NULL);
  676. }
  677. out:
  678. /* If we are authenticated, move to the proper state.
  679. * It is possible an ADISC arrived and the remote nport
  680. * is already in MAPPED or UNMAPPED state. Catch this
  681. * condition and don't set the nlp_state again because
  682. * it causes an unnecessary transport unregister/register.
  683. *
  684. * Nodes marked for ADISC will move MAPPED or UNMAPPED state
  685. * after issuing ADISC
  686. */
  687. if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) {
  688. if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
  689. !(ndlp->nlp_flag & NLP_NPR_ADISC))
  690. lpfc_nlp_set_state(vport, ndlp,
  691. NLP_STE_MAPPED_NODE);
  692. }
  693. return 1;
  694. }
  695. /* Reject this request because invalid parameters */
  696. stat.un.b.lsRjtRsvd0 = 0;
  697. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  698. stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
  699. stat.un.b.vendorUnique = 0;
  700. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  701. /* 1 sec timeout */
  702. mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
  703. spin_lock_irq(&ndlp->lock);
  704. ndlp->nlp_flag |= NLP_DELAY_TMO;
  705. spin_unlock_irq(&ndlp->lock);
  706. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  707. ndlp->nlp_prev_state = ndlp->nlp_state;
  708. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  709. return 0;
  710. }
  711. static int
  712. lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  713. struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
  714. {
  715. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  716. struct lpfc_hba *phba = vport->phba;
  717. struct lpfc_vport **vports;
  718. int i, active_vlink_present = 0 ;
  719. /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
  720. /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
  721. * PLOGIs during LOGO storms from a device.
  722. */
  723. spin_lock_irq(&ndlp->lock);
  724. ndlp->nlp_flag |= NLP_LOGO_ACC;
  725. spin_unlock_irq(&ndlp->lock);
  726. if (els_cmd == ELS_CMD_PRLO)
  727. lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
  728. else
  729. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
  730. /* This clause allows the initiator to ACC the LOGO back to the
  731. * Fabric Domain Controller. It does deliberately skip all other
  732. * steps because some fabrics send RDP requests after logging out
  733. * from the initiator.
  734. */
  735. if (ndlp->nlp_type & NLP_FABRIC &&
  736. ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK))
  737. return 0;
  738. /* Notify transport of connectivity loss to trigger cleanup. */
  739. if (phba->nvmet_support &&
  740. ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
  741. lpfc_nvmet_invalidate_host(phba, ndlp);
  742. if (ndlp->nlp_DID == Fabric_DID) {
  743. if (vport->port_state <= LPFC_FDISC ||
  744. vport->fc_flag & FC_PT2PT)
  745. goto out;
  746. lpfc_linkdown_port(vport);
  747. spin_lock_irq(shost->host_lock);
  748. vport->fc_flag |= FC_VPORT_LOGO_RCVD;
  749. spin_unlock_irq(shost->host_lock);
  750. vports = lpfc_create_vport_work_array(phba);
  751. if (vports) {
  752. for (i = 0; i <= phba->max_vports && vports[i] != NULL;
  753. i++) {
  754. if ((!(vports[i]->fc_flag &
  755. FC_VPORT_LOGO_RCVD)) &&
  756. (vports[i]->port_state > LPFC_FDISC)) {
  757. active_vlink_present = 1;
  758. break;
  759. }
  760. }
  761. lpfc_destroy_vport_work_array(phba, vports);
  762. }
  763. /*
  764. * Don't re-instantiate if vport is marked for deletion.
  765. * If we are here first then vport_delete is going to wait
  766. * for discovery to complete.
  767. */
  768. if (!(vport->load_flag & FC_UNLOADING) &&
  769. active_vlink_present) {
  770. /*
  771. * If there are other active VLinks present,
  772. * re-instantiate the Vlink using FDISC.
  773. */
  774. mod_timer(&ndlp->nlp_delayfunc,
  775. jiffies + msecs_to_jiffies(1000));
  776. spin_lock_irq(&ndlp->lock);
  777. ndlp->nlp_flag |= NLP_DELAY_TMO;
  778. spin_unlock_irq(&ndlp->lock);
  779. ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
  780. vport->port_state = LPFC_FDISC;
  781. } else {
  782. spin_lock_irq(shost->host_lock);
  783. phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
  784. spin_unlock_irq(shost->host_lock);
  785. lpfc_retry_pport_discovery(phba);
  786. }
  787. } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
  788. ((ndlp->nlp_type & NLP_FCP_TARGET) ||
  789. (ndlp->nlp_type & NLP_NVME_TARGET) ||
  790. (vport->fc_flag & FC_PT2PT))) ||
  791. (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
  792. /* Only try to re-login if this is NOT a Fabric Node
  793. * AND the remote NPORT is a FCP/NVME Target or we
  794. * are in pt2pt mode. NLP_STE_ADISC_ISSUE is a special
  795. * case for LOGO as a response to ADISC behavior.
  796. */
  797. mod_timer(&ndlp->nlp_delayfunc,
  798. jiffies + msecs_to_jiffies(1000 * 1));
  799. spin_lock_irq(&ndlp->lock);
  800. ndlp->nlp_flag |= NLP_DELAY_TMO;
  801. spin_unlock_irq(&ndlp->lock);
  802. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  803. }
  804. out:
  805. /* Unregister from backend, could have been skipped due to ADISC */
  806. lpfc_nlp_unreg_node(vport, ndlp);
  807. ndlp->nlp_prev_state = ndlp->nlp_state;
  808. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  809. spin_lock_irq(&ndlp->lock);
  810. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  811. spin_unlock_irq(&ndlp->lock);
  812. /* The driver has to wait until the ACC completes before it continues
  813. * processing the LOGO. The action will resume in
  814. * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
  815. * unreg_login, the driver waits so the ACC does not get aborted.
  816. */
  817. return 0;
  818. }
  819. static uint32_t
  820. lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
  821. struct lpfc_nodelist *ndlp,
  822. struct lpfc_iocbq *cmdiocb)
  823. {
  824. struct ls_rjt stat;
  825. uint32_t *payload;
  826. uint32_t cmd;
  827. payload = cmdiocb->cmd_dmabuf->virt;
  828. cmd = *payload;
  829. if (vport->phba->nvmet_support) {
  830. /* Must be a NVME PRLI */
  831. if (cmd == ELS_CMD_PRLI)
  832. goto out;
  833. } else {
  834. /* Initiator mode. */
  835. if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI))
  836. goto out;
  837. }
  838. return 1;
  839. out:
  840. lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
  841. "6115 Rcv PRLI (%x) check failed: ndlp rpi %d "
  842. "state x%x flags x%x\n",
  843. cmd, ndlp->nlp_rpi, ndlp->nlp_state,
  844. ndlp->nlp_flag);
  845. memset(&stat, 0, sizeof(struct ls_rjt));
  846. stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED;
  847. stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED;
  848. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
  849. ndlp, NULL);
  850. return 0;
  851. }
  852. static void
  853. lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  854. struct lpfc_iocbq *cmdiocb)
  855. {
  856. struct lpfc_hba *phba = vport->phba;
  857. struct lpfc_dmabuf *pcmd;
  858. uint32_t *lp;
  859. PRLI *npr;
  860. struct fc_rport *rport = ndlp->rport;
  861. u32 roles;
  862. pcmd = cmdiocb->cmd_dmabuf;
  863. lp = (uint32_t *)pcmd->virt;
  864. npr = (PRLI *)((uint8_t *)lp + sizeof(uint32_t));
  865. if ((npr->prliType == PRLI_FCP_TYPE) ||
  866. (npr->prliType == PRLI_NVME_TYPE)) {
  867. if (npr->initiatorFunc) {
  868. if (npr->prliType == PRLI_FCP_TYPE)
  869. ndlp->nlp_type |= NLP_FCP_INITIATOR;
  870. if (npr->prliType == PRLI_NVME_TYPE)
  871. ndlp->nlp_type |= NLP_NVME_INITIATOR;
  872. }
  873. if (npr->targetFunc) {
  874. if (npr->prliType == PRLI_FCP_TYPE)
  875. ndlp->nlp_type |= NLP_FCP_TARGET;
  876. if (npr->prliType == PRLI_NVME_TYPE)
  877. ndlp->nlp_type |= NLP_NVME_TARGET;
  878. if (npr->writeXferRdyDis)
  879. ndlp->nlp_flag |= NLP_FIRSTBURST;
  880. }
  881. if (npr->Retry && ndlp->nlp_type &
  882. (NLP_FCP_INITIATOR | NLP_FCP_TARGET))
  883. ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
  884. if (npr->Retry && phba->nsler &&
  885. ndlp->nlp_type & (NLP_NVME_INITIATOR | NLP_NVME_TARGET))
  886. ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
  887. /* If this driver is in nvme target mode, set the ndlp's fc4
  888. * type to NVME provided the PRLI response claims NVME FC4
  889. * type. Target mode does not issue gft_id so doesn't get
  890. * the fc4 type set until now.
  891. */
  892. if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) {
  893. ndlp->nlp_fc4_type |= NLP_FC4_NVME;
  894. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  895. }
  896. /* Fabric Controllers send FCP PRLI as an initiator but should
  897. * not get recognized as FCP type and registered with transport.
  898. */
  899. if (npr->prliType == PRLI_FCP_TYPE &&
  900. !(ndlp->nlp_type & NLP_FABRIC))
  901. ndlp->nlp_fc4_type |= NLP_FC4_FCP;
  902. }
  903. if (rport) {
  904. /* We need to update the rport role values */
  905. roles = FC_RPORT_ROLE_UNKNOWN;
  906. if (ndlp->nlp_type & NLP_FCP_INITIATOR)
  907. roles |= FC_RPORT_ROLE_FCP_INITIATOR;
  908. if (ndlp->nlp_type & NLP_FCP_TARGET)
  909. roles |= FC_RPORT_ROLE_FCP_TARGET;
  910. lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
  911. "rport rolechg: role:x%x did:x%x flg:x%x",
  912. roles, ndlp->nlp_DID, ndlp->nlp_flag);
  913. if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
  914. fc_remote_port_rolechg(rport, roles);
  915. }
  916. }
  917. static uint32_t
  918. lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
  919. {
  920. if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
  921. spin_lock_irq(&ndlp->lock);
  922. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  923. spin_unlock_irq(&ndlp->lock);
  924. return 0;
  925. }
  926. if (!(vport->fc_flag & FC_PT2PT)) {
  927. /* Check config parameter use-adisc or FCP-2 */
  928. if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
  929. ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
  930. (ndlp->nlp_type & NLP_FCP_TARGET)))) {
  931. spin_lock_irq(&ndlp->lock);
  932. ndlp->nlp_flag |= NLP_NPR_ADISC;
  933. spin_unlock_irq(&ndlp->lock);
  934. return 1;
  935. }
  936. }
  937. spin_lock_irq(&ndlp->lock);
  938. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  939. spin_unlock_irq(&ndlp->lock);
  940. lpfc_unreg_rpi(vport, ndlp);
  941. return 0;
  942. }
  943. /**
  944. * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
  945. * @phba : Pointer to lpfc_hba structure.
  946. * @vport: Pointer to lpfc_vport structure.
  947. * @ndlp: Pointer to lpfc_nodelist structure.
  948. * @rpi : rpi to be release.
  949. *
  950. * This function will send a unreg_login mailbox command to the firmware
  951. * to release a rpi.
  952. **/
  953. static void
  954. lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
  955. struct lpfc_nodelist *ndlp, uint16_t rpi)
  956. {
  957. LPFC_MBOXQ_t *pmb;
  958. int rc;
  959. /* If there is already an UNREG in progress for this ndlp,
  960. * no need to queue up another one.
  961. */
  962. if (ndlp->nlp_flag & NLP_UNREG_INP) {
  963. lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
  964. "1435 release_rpi SKIP UNREG x%x on "
  965. "NPort x%x deferred x%x flg x%x "
  966. "Data: x%px\n",
  967. ndlp->nlp_rpi, ndlp->nlp_DID,
  968. ndlp->nlp_defer_did,
  969. ndlp->nlp_flag, ndlp);
  970. return;
  971. }
  972. pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
  973. GFP_KERNEL);
  974. if (!pmb)
  975. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  976. "2796 mailbox memory allocation failed \n");
  977. else {
  978. lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
  979. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  980. pmb->vport = vport;
  981. pmb->ctx_ndlp = lpfc_nlp_get(ndlp);
  982. if (!pmb->ctx_ndlp) {
  983. mempool_free(pmb, phba->mbox_mem_pool);
  984. return;
  985. }
  986. if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
  987. (!(vport->fc_flag & FC_OFFLINE_MODE)))
  988. ndlp->nlp_flag |= NLP_UNREG_INP;
  989. lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
  990. "1437 release_rpi UNREG x%x "
  991. "on NPort x%x flg x%x\n",
  992. ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag);
  993. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  994. if (rc == MBX_NOT_FINISHED) {
  995. lpfc_nlp_put(ndlp);
  996. mempool_free(pmb, phba->mbox_mem_pool);
  997. }
  998. }
  999. }
  1000. static uint32_t
  1001. lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1002. void *arg, uint32_t evt)
  1003. {
  1004. struct lpfc_hba *phba;
  1005. LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
  1006. uint16_t rpi;
  1007. phba = vport->phba;
  1008. /* Release the RPI if reglogin completing */
  1009. if (!(phba->pport->load_flag & FC_UNLOADING) &&
  1010. (evt == NLP_EVT_CMPL_REG_LOGIN) &&
  1011. (!pmb->u.mb.mbxStatus)) {
  1012. rpi = pmb->u.mb.un.varWords[0];
  1013. lpfc_release_rpi(phba, vport, ndlp, rpi);
  1014. }
  1015. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  1016. "0271 Illegal State Transition: node x%x "
  1017. "event x%x, state x%x Data: x%x x%x\n",
  1018. ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
  1019. ndlp->nlp_flag);
  1020. return ndlp->nlp_state;
  1021. }
  1022. static uint32_t
  1023. lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1024. void *arg, uint32_t evt)
  1025. {
  1026. /* This transition is only legal if we previously
  1027. * rcv'ed a PLOGI. Since we don't want 2 discovery threads
  1028. * working on the same NPortID, do nothing for this thread
  1029. * to stop it.
  1030. */
  1031. if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
  1032. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  1033. "0272 Illegal State Transition: node x%x "
  1034. "event x%x, state x%x Data: x%x x%x\n",
  1035. ndlp->nlp_DID, evt, ndlp->nlp_state,
  1036. ndlp->nlp_rpi, ndlp->nlp_flag);
  1037. }
  1038. return ndlp->nlp_state;
  1039. }
  1040. /* Start of Discovery State Machine routines */
  1041. static uint32_t
  1042. lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1043. void *arg, uint32_t evt)
  1044. {
  1045. struct lpfc_iocbq *cmdiocb;
  1046. cmdiocb = (struct lpfc_iocbq *) arg;
  1047. if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
  1048. return ndlp->nlp_state;
  1049. }
  1050. return NLP_STE_FREED_NODE;
  1051. }
  1052. static uint32_t
  1053. lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1054. void *arg, uint32_t evt)
  1055. {
  1056. lpfc_issue_els_logo(vport, ndlp, 0);
  1057. return ndlp->nlp_state;
  1058. }
  1059. static uint32_t
  1060. lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1061. void *arg, uint32_t evt)
  1062. {
  1063. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1064. spin_lock_irq(&ndlp->lock);
  1065. ndlp->nlp_flag |= NLP_LOGO_ACC;
  1066. spin_unlock_irq(&ndlp->lock);
  1067. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
  1068. return ndlp->nlp_state;
  1069. }
  1070. static uint32_t
  1071. lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1072. void *arg, uint32_t evt)
  1073. {
  1074. return NLP_STE_FREED_NODE;
  1075. }
  1076. static uint32_t
  1077. lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1078. void *arg, uint32_t evt)
  1079. {
  1080. return NLP_STE_FREED_NODE;
  1081. }
  1082. static uint32_t
  1083. lpfc_device_recov_unused_node(struct lpfc_vport *vport,
  1084. struct lpfc_nodelist *ndlp,
  1085. void *arg, uint32_t evt)
  1086. {
  1087. return ndlp->nlp_state;
  1088. }
  1089. static uint32_t
  1090. lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1091. void *arg, uint32_t evt)
  1092. {
  1093. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1094. struct lpfc_hba *phba = vport->phba;
  1095. struct lpfc_iocbq *cmdiocb = arg;
  1096. struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
  1097. uint32_t *lp = (uint32_t *) pcmd->virt;
  1098. struct serv_parm *sp = (struct serv_parm *) (lp + 1);
  1099. struct ls_rjt stat;
  1100. int port_cmp;
  1101. memset(&stat, 0, sizeof (struct ls_rjt));
  1102. /* For a PLOGI, we only accept if our portname is less
  1103. * than the remote portname.
  1104. */
  1105. phba->fc_stat.elsLogiCol++;
  1106. port_cmp = memcmp(&vport->fc_portname, &sp->portName,
  1107. sizeof(struct lpfc_name));
  1108. if (port_cmp >= 0) {
  1109. /* Reject this request because the remote node will accept
  1110. ours */
  1111. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  1112. stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
  1113. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
  1114. NULL);
  1115. } else {
  1116. if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
  1117. (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
  1118. (vport->num_disc_nodes)) {
  1119. spin_lock_irq(&ndlp->lock);
  1120. ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
  1121. spin_unlock_irq(&ndlp->lock);
  1122. /* Check if there are more PLOGIs to be sent */
  1123. lpfc_more_plogi(vport);
  1124. if (vport->num_disc_nodes == 0) {
  1125. spin_lock_irq(shost->host_lock);
  1126. vport->fc_flag &= ~FC_NDISC_ACTIVE;
  1127. spin_unlock_irq(shost->host_lock);
  1128. lpfc_can_disctmo(vport);
  1129. lpfc_end_rscn(vport);
  1130. }
  1131. }
  1132. } /* If our portname was less */
  1133. return ndlp->nlp_state;
  1134. }
  1135. static uint32_t
  1136. lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1137. void *arg, uint32_t evt)
  1138. {
  1139. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1140. struct ls_rjt stat;
  1141. memset(&stat, 0, sizeof (struct ls_rjt));
  1142. stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
  1143. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  1144. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  1145. return ndlp->nlp_state;
  1146. }
  1147. static uint32_t
  1148. lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1149. void *arg, uint32_t evt)
  1150. {
  1151. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1152. /* Retrieve RPI from LOGO IOCB. RPI is used for CMD_ABORT_XRI_CN */
  1153. if (vport->phba->sli_rev == LPFC_SLI_REV3)
  1154. ndlp->nlp_rpi = cmdiocb->iocb.ulpIoTag;
  1155. /* software abort outstanding PLOGI */
  1156. lpfc_els_abort(vport->phba, ndlp);
  1157. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  1158. return ndlp->nlp_state;
  1159. }
  1160. static uint32_t
  1161. lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1162. void *arg, uint32_t evt)
  1163. {
  1164. struct lpfc_hba *phba = vport->phba;
  1165. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1166. /* software abort outstanding PLOGI */
  1167. lpfc_els_abort(phba, ndlp);
  1168. if (evt == NLP_EVT_RCV_LOGO) {
  1169. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
  1170. } else {
  1171. lpfc_issue_els_logo(vport, ndlp, 0);
  1172. }
  1173. /* Put ndlp in npr state set plogi timer for 1 sec */
  1174. mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
  1175. spin_lock_irq(&ndlp->lock);
  1176. ndlp->nlp_flag |= NLP_DELAY_TMO;
  1177. spin_unlock_irq(&ndlp->lock);
  1178. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  1179. ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
  1180. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1181. return ndlp->nlp_state;
  1182. }
  1183. static uint32_t
  1184. lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
  1185. struct lpfc_nodelist *ndlp,
  1186. void *arg,
  1187. uint32_t evt)
  1188. {
  1189. struct lpfc_hba *phba = vport->phba;
  1190. struct lpfc_iocbq *cmdiocb, *rspiocb;
  1191. struct lpfc_dmabuf *pcmd, *prsp;
  1192. uint32_t *lp;
  1193. uint32_t vid, flag;
  1194. struct serv_parm *sp;
  1195. uint32_t ed_tov;
  1196. LPFC_MBOXQ_t *mbox;
  1197. int rc;
  1198. u32 ulp_status;
  1199. u32 did;
  1200. cmdiocb = (struct lpfc_iocbq *) arg;
  1201. rspiocb = cmdiocb->rsp_iocb;
  1202. ulp_status = get_job_ulpstatus(phba, rspiocb);
  1203. if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
  1204. /* Recovery from PLOGI collision logic */
  1205. return ndlp->nlp_state;
  1206. }
  1207. if (ulp_status)
  1208. goto out;
  1209. pcmd = cmdiocb->cmd_dmabuf;
  1210. prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
  1211. if (!prsp)
  1212. goto out;
  1213. lp = (uint32_t *) prsp->virt;
  1214. sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
  1215. /* Some switches have FDMI servers returning 0 for WWN */
  1216. if ((ndlp->nlp_DID != FDMI_DID) &&
  1217. (wwn_to_u64(sp->portName.u.wwn) == 0 ||
  1218. wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
  1219. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  1220. "0142 PLOGI RSP: Invalid WWN.\n");
  1221. goto out;
  1222. }
  1223. if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
  1224. goto out;
  1225. /* PLOGI chkparm OK */
  1226. lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
  1227. "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
  1228. ndlp->nlp_DID, ndlp->nlp_state,
  1229. ndlp->nlp_flag, ndlp->nlp_rpi);
  1230. if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
  1231. ndlp->nlp_fcp_info |= CLASS2;
  1232. else
  1233. ndlp->nlp_fcp_info |= CLASS3;
  1234. ndlp->nlp_class_sup = 0;
  1235. if (sp->cls1.classValid)
  1236. ndlp->nlp_class_sup |= FC_COS_CLASS1;
  1237. if (sp->cls2.classValid)
  1238. ndlp->nlp_class_sup |= FC_COS_CLASS2;
  1239. if (sp->cls3.classValid)
  1240. ndlp->nlp_class_sup |= FC_COS_CLASS3;
  1241. if (sp->cls4.classValid)
  1242. ndlp->nlp_class_sup |= FC_COS_CLASS4;
  1243. ndlp->nlp_maxframe =
  1244. ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
  1245. if ((vport->fc_flag & FC_PT2PT) &&
  1246. (vport->fc_flag & FC_PT2PT_PLOGI)) {
  1247. ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
  1248. if (sp->cmn.edtovResolution) {
  1249. /* E_D_TOV ticks are in nanoseconds */
  1250. ed_tov = (phba->fc_edtov + 999999) / 1000000;
  1251. }
  1252. ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
  1253. if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
  1254. sp->cmn.valid_vendor_ver_level) {
  1255. vid = be32_to_cpu(sp->un.vv.vid);
  1256. flag = be32_to_cpu(sp->un.vv.flags);
  1257. if ((vid == LPFC_VV_EMLX_ID) &&
  1258. (flag & LPFC_VV_SUPPRESS_RSP))
  1259. ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
  1260. }
  1261. /*
  1262. * Use the larger EDTOV
  1263. * RATOV = 2 * EDTOV for pt-to-pt
  1264. */
  1265. if (ed_tov > phba->fc_edtov)
  1266. phba->fc_edtov = ed_tov;
  1267. phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
  1268. memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
  1269. /* Issue config_link / reg_vfi to account for updated TOV's */
  1270. if (phba->sli_rev == LPFC_SLI_REV4) {
  1271. lpfc_issue_reg_vfi(vport);
  1272. } else {
  1273. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1274. if (!mbox) {
  1275. lpfc_printf_vlog(vport, KERN_ERR,
  1276. LOG_TRACE_EVENT,
  1277. "0133 PLOGI: no memory "
  1278. "for config_link "
  1279. "Data: x%x x%x x%x x%x\n",
  1280. ndlp->nlp_DID, ndlp->nlp_state,
  1281. ndlp->nlp_flag, ndlp->nlp_rpi);
  1282. goto out;
  1283. }
  1284. lpfc_config_link(phba, mbox);
  1285. mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  1286. mbox->vport = vport;
  1287. rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
  1288. if (rc == MBX_NOT_FINISHED) {
  1289. mempool_free(mbox, phba->mbox_mem_pool);
  1290. goto out;
  1291. }
  1292. }
  1293. }
  1294. lpfc_unreg_rpi(vport, ndlp);
  1295. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1296. if (!mbox) {
  1297. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  1298. "0018 PLOGI: no memory for reg_login "
  1299. "Data: x%x x%x x%x x%x\n",
  1300. ndlp->nlp_DID, ndlp->nlp_state,
  1301. ndlp->nlp_flag, ndlp->nlp_rpi);
  1302. goto out;
  1303. }
  1304. did = get_job_els_rsp64_did(phba, cmdiocb);
  1305. if (lpfc_reg_rpi(phba, vport->vpi, did,
  1306. (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
  1307. switch (ndlp->nlp_DID) {
  1308. case NameServer_DID:
  1309. mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
  1310. /* Fabric Controller Node needs these parameters. */
  1311. memcpy(&ndlp->fc_sparam, sp, sizeof(struct serv_parm));
  1312. break;
  1313. case FDMI_DID:
  1314. mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
  1315. break;
  1316. default:
  1317. ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
  1318. mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
  1319. }
  1320. mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
  1321. if (!mbox->ctx_ndlp)
  1322. goto out;
  1323. mbox->vport = vport;
  1324. if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
  1325. != MBX_NOT_FINISHED) {
  1326. lpfc_nlp_set_state(vport, ndlp,
  1327. NLP_STE_REG_LOGIN_ISSUE);
  1328. return ndlp->nlp_state;
  1329. }
  1330. if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
  1331. ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
  1332. /* decrement node reference count to the failed mbox
  1333. * command
  1334. */
  1335. lpfc_nlp_put(ndlp);
  1336. lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
  1337. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  1338. "0134 PLOGI: cannot issue reg_login "
  1339. "Data: x%x x%x x%x x%x\n",
  1340. ndlp->nlp_DID, ndlp->nlp_state,
  1341. ndlp->nlp_flag, ndlp->nlp_rpi);
  1342. } else {
  1343. mempool_free(mbox, phba->mbox_mem_pool);
  1344. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  1345. "0135 PLOGI: cannot format reg_login "
  1346. "Data: x%x x%x x%x x%x\n",
  1347. ndlp->nlp_DID, ndlp->nlp_state,
  1348. ndlp->nlp_flag, ndlp->nlp_rpi);
  1349. }
  1350. out:
  1351. if (ndlp->nlp_DID == NameServer_DID) {
  1352. lpfc_vport_set_state(vport, FC_VPORT_FAILED);
  1353. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  1354. "0261 Cannot Register NameServer login\n");
  1355. }
  1356. /*
  1357. ** In case the node reference counter does not go to zero, ensure that
  1358. ** the stale state for the node is not processed.
  1359. */
  1360. ndlp->nlp_prev_state = ndlp->nlp_state;
  1361. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1362. return NLP_STE_FREED_NODE;
  1363. }
  1364. static uint32_t
  1365. lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1366. void *arg, uint32_t evt)
  1367. {
  1368. return ndlp->nlp_state;
  1369. }
  1370. static uint32_t
  1371. lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
  1372. struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
  1373. {
  1374. struct lpfc_hba *phba;
  1375. LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
  1376. MAILBOX_t *mb = &pmb->u.mb;
  1377. uint16_t rpi;
  1378. phba = vport->phba;
  1379. /* Release the RPI */
  1380. if (!(phba->pport->load_flag & FC_UNLOADING) &&
  1381. !mb->mbxStatus) {
  1382. rpi = pmb->u.mb.un.varWords[0];
  1383. lpfc_release_rpi(phba, vport, ndlp, rpi);
  1384. }
  1385. return ndlp->nlp_state;
  1386. }
  1387. static uint32_t
  1388. lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1389. void *arg, uint32_t evt)
  1390. {
  1391. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  1392. spin_lock_irq(&ndlp->lock);
  1393. ndlp->nlp_flag |= NLP_NODEV_REMOVE;
  1394. spin_unlock_irq(&ndlp->lock);
  1395. return ndlp->nlp_state;
  1396. } else {
  1397. /* software abort outstanding PLOGI */
  1398. lpfc_els_abort(vport->phba, ndlp);
  1399. lpfc_drop_node(vport, ndlp);
  1400. return NLP_STE_FREED_NODE;
  1401. }
  1402. }
  1403. static uint32_t
  1404. lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
  1405. struct lpfc_nodelist *ndlp,
  1406. void *arg,
  1407. uint32_t evt)
  1408. {
  1409. struct lpfc_hba *phba = vport->phba;
  1410. /* Don't do anything that will mess up processing of the
  1411. * previous RSCN.
  1412. */
  1413. if (vport->fc_flag & FC_RSCN_DEFERRED)
  1414. return ndlp->nlp_state;
  1415. /* software abort outstanding PLOGI */
  1416. lpfc_els_abort(phba, ndlp);
  1417. ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
  1418. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1419. spin_lock_irq(&ndlp->lock);
  1420. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  1421. spin_unlock_irq(&ndlp->lock);
  1422. return ndlp->nlp_state;
  1423. }
  1424. static uint32_t
  1425. lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1426. void *arg, uint32_t evt)
  1427. {
  1428. struct lpfc_hba *phba = vport->phba;
  1429. struct lpfc_iocbq *cmdiocb;
  1430. /* software abort outstanding ADISC */
  1431. lpfc_els_abort(phba, ndlp);
  1432. cmdiocb = (struct lpfc_iocbq *) arg;
  1433. if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
  1434. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  1435. spin_lock_irq(&ndlp->lock);
  1436. ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
  1437. spin_unlock_irq(&ndlp->lock);
  1438. if (vport->num_disc_nodes)
  1439. lpfc_more_adisc(vport);
  1440. }
  1441. return ndlp->nlp_state;
  1442. }
  1443. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  1444. lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
  1445. lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
  1446. return ndlp->nlp_state;
  1447. }
  1448. static uint32_t
  1449. lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1450. void *arg, uint32_t evt)
  1451. {
  1452. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1453. if (lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
  1454. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  1455. return ndlp->nlp_state;
  1456. }
  1457. static uint32_t
  1458. lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1459. void *arg, uint32_t evt)
  1460. {
  1461. struct lpfc_hba *phba = vport->phba;
  1462. struct lpfc_iocbq *cmdiocb;
  1463. cmdiocb = (struct lpfc_iocbq *) arg;
  1464. /* software abort outstanding ADISC */
  1465. lpfc_els_abort(phba, ndlp);
  1466. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  1467. return ndlp->nlp_state;
  1468. }
  1469. static uint32_t
  1470. lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
  1471. struct lpfc_nodelist *ndlp,
  1472. void *arg, uint32_t evt)
  1473. {
  1474. struct lpfc_iocbq *cmdiocb;
  1475. cmdiocb = (struct lpfc_iocbq *) arg;
  1476. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  1477. return ndlp->nlp_state;
  1478. }
  1479. static uint32_t
  1480. lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1481. void *arg, uint32_t evt)
  1482. {
  1483. struct lpfc_iocbq *cmdiocb;
  1484. cmdiocb = (struct lpfc_iocbq *) arg;
  1485. /* Treat like rcv logo */
  1486. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
  1487. return ndlp->nlp_state;
  1488. }
  1489. static uint32_t
  1490. lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
  1491. struct lpfc_nodelist *ndlp,
  1492. void *arg, uint32_t evt)
  1493. {
  1494. struct lpfc_hba *phba = vport->phba;
  1495. struct lpfc_iocbq *cmdiocb, *rspiocb;
  1496. ADISC *ap;
  1497. int rc;
  1498. u32 ulp_status;
  1499. cmdiocb = (struct lpfc_iocbq *) arg;
  1500. rspiocb = cmdiocb->rsp_iocb;
  1501. ulp_status = get_job_ulpstatus(phba, rspiocb);
  1502. ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
  1503. if ((ulp_status) ||
  1504. (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
  1505. /* 1 sec timeout */
  1506. mod_timer(&ndlp->nlp_delayfunc,
  1507. jiffies + msecs_to_jiffies(1000));
  1508. spin_lock_irq(&ndlp->lock);
  1509. ndlp->nlp_flag |= NLP_DELAY_TMO;
  1510. spin_unlock_irq(&ndlp->lock);
  1511. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  1512. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  1513. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1514. lpfc_unreg_rpi(vport, ndlp);
  1515. return ndlp->nlp_state;
  1516. }
  1517. if (phba->sli_rev == LPFC_SLI_REV4) {
  1518. rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
  1519. if (rc) {
  1520. /* Stay in state and retry. */
  1521. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  1522. return ndlp->nlp_state;
  1523. }
  1524. }
  1525. if (ndlp->nlp_type & NLP_FCP_TARGET)
  1526. ndlp->nlp_fc4_type |= NLP_FC4_FCP;
  1527. if (ndlp->nlp_type & NLP_NVME_TARGET)
  1528. ndlp->nlp_fc4_type |= NLP_FC4_NVME;
  1529. if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) {
  1530. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  1531. lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
  1532. } else {
  1533. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  1534. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  1535. }
  1536. return ndlp->nlp_state;
  1537. }
  1538. static uint32_t
  1539. lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1540. void *arg, uint32_t evt)
  1541. {
  1542. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  1543. spin_lock_irq(&ndlp->lock);
  1544. ndlp->nlp_flag |= NLP_NODEV_REMOVE;
  1545. spin_unlock_irq(&ndlp->lock);
  1546. return ndlp->nlp_state;
  1547. } else {
  1548. /* software abort outstanding ADISC */
  1549. lpfc_els_abort(vport->phba, ndlp);
  1550. lpfc_drop_node(vport, ndlp);
  1551. return NLP_STE_FREED_NODE;
  1552. }
  1553. }
  1554. static uint32_t
  1555. lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
  1556. struct lpfc_nodelist *ndlp,
  1557. void *arg,
  1558. uint32_t evt)
  1559. {
  1560. struct lpfc_hba *phba = vport->phba;
  1561. /* Don't do anything that will mess up processing of the
  1562. * previous RSCN.
  1563. */
  1564. if (vport->fc_flag & FC_RSCN_DEFERRED)
  1565. return ndlp->nlp_state;
  1566. /* software abort outstanding ADISC */
  1567. lpfc_els_abort(phba, ndlp);
  1568. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  1569. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1570. spin_lock_irq(&ndlp->lock);
  1571. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  1572. spin_unlock_irq(&ndlp->lock);
  1573. lpfc_disc_set_adisc(vport, ndlp);
  1574. return ndlp->nlp_state;
  1575. }
  1576. static uint32_t
  1577. lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
  1578. struct lpfc_nodelist *ndlp,
  1579. void *arg,
  1580. uint32_t evt)
  1581. {
  1582. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1583. lpfc_rcv_plogi(vport, ndlp, cmdiocb);
  1584. return ndlp->nlp_state;
  1585. }
  1586. static uint32_t
  1587. lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
  1588. struct lpfc_nodelist *ndlp,
  1589. void *arg,
  1590. uint32_t evt)
  1591. {
  1592. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1593. struct ls_rjt stat;
  1594. if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) {
  1595. return ndlp->nlp_state;
  1596. }
  1597. if (vport->phba->nvmet_support) {
  1598. /* NVME Target mode. Handle and respond to the PRLI and
  1599. * transition to UNMAPPED provided the RPI has completed
  1600. * registration.
  1601. */
  1602. if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
  1603. lpfc_rcv_prli(vport, ndlp, cmdiocb);
  1604. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  1605. } else {
  1606. /* RPI registration has not completed. Reject the PRLI
  1607. * to prevent an illegal state transition when the
  1608. * rpi registration does complete.
  1609. */
  1610. memset(&stat, 0, sizeof(struct ls_rjt));
  1611. stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
  1612. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  1613. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
  1614. ndlp, NULL);
  1615. return ndlp->nlp_state;
  1616. }
  1617. } else {
  1618. /* Initiator mode. */
  1619. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  1620. }
  1621. return ndlp->nlp_state;
  1622. }
  1623. static uint32_t
  1624. lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
  1625. struct lpfc_nodelist *ndlp,
  1626. void *arg,
  1627. uint32_t evt)
  1628. {
  1629. struct lpfc_hba *phba = vport->phba;
  1630. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1631. LPFC_MBOXQ_t *mb;
  1632. LPFC_MBOXQ_t *nextmb;
  1633. struct lpfc_nodelist *ns_ndlp;
  1634. cmdiocb = (struct lpfc_iocbq *) arg;
  1635. /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
  1636. if ((mb = phba->sli.mbox_active)) {
  1637. if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
  1638. (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
  1639. ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
  1640. lpfc_nlp_put(ndlp);
  1641. mb->ctx_ndlp = NULL;
  1642. mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  1643. }
  1644. }
  1645. spin_lock_irq(&phba->hbalock);
  1646. list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
  1647. if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
  1648. (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
  1649. ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
  1650. lpfc_nlp_put(ndlp);
  1651. list_del(&mb->list);
  1652. phba->sli.mboxq_cnt--;
  1653. lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
  1654. }
  1655. }
  1656. spin_unlock_irq(&phba->hbalock);
  1657. /* software abort if any GID_FT is outstanding */
  1658. if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
  1659. ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
  1660. if (ns_ndlp)
  1661. lpfc_els_abort(phba, ns_ndlp);
  1662. }
  1663. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  1664. return ndlp->nlp_state;
  1665. }
  1666. static uint32_t
  1667. lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
  1668. struct lpfc_nodelist *ndlp,
  1669. void *arg,
  1670. uint32_t evt)
  1671. {
  1672. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1673. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  1674. return ndlp->nlp_state;
  1675. }
  1676. static uint32_t
  1677. lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
  1678. struct lpfc_nodelist *ndlp,
  1679. void *arg,
  1680. uint32_t evt)
  1681. {
  1682. struct lpfc_iocbq *cmdiocb;
  1683. cmdiocb = (struct lpfc_iocbq *) arg;
  1684. lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
  1685. return ndlp->nlp_state;
  1686. }
  1687. static uint32_t
  1688. lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
  1689. struct lpfc_nodelist *ndlp,
  1690. void *arg,
  1691. uint32_t evt)
  1692. {
  1693. struct lpfc_hba *phba = vport->phba;
  1694. LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
  1695. MAILBOX_t *mb = &pmb->u.mb;
  1696. uint32_t did = mb->un.varWords[1];
  1697. if (mb->mbxStatus) {
  1698. /* RegLogin failed */
  1699. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  1700. "0246 RegLogin failed Data: x%x x%x x%x x%x "
  1701. "x%x\n",
  1702. did, mb->mbxStatus, vport->port_state,
  1703. mb->un.varRegLogin.vpi,
  1704. mb->un.varRegLogin.rpi);
  1705. /*
  1706. * If RegLogin failed due to lack of HBA resources do not
  1707. * retry discovery.
  1708. */
  1709. if (mb->mbxStatus == MBXERR_RPI_FULL) {
  1710. ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
  1711. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1712. return ndlp->nlp_state;
  1713. }
  1714. /* Put ndlp in npr state set plogi timer for 1 sec */
  1715. mod_timer(&ndlp->nlp_delayfunc,
  1716. jiffies + msecs_to_jiffies(1000 * 1));
  1717. spin_lock_irq(&ndlp->lock);
  1718. ndlp->nlp_flag |= NLP_DELAY_TMO;
  1719. spin_unlock_irq(&ndlp->lock);
  1720. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  1721. lpfc_issue_els_logo(vport, ndlp, 0);
  1722. return ndlp->nlp_state;
  1723. }
  1724. /* SLI4 ports have preallocated logical rpis. */
  1725. if (phba->sli_rev < LPFC_SLI_REV4)
  1726. ndlp->nlp_rpi = mb->un.varWords[0];
  1727. ndlp->nlp_flag |= NLP_RPI_REGISTERED;
  1728. /* Only if we are not a fabric nport do we issue PRLI */
  1729. lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
  1730. "3066 RegLogin Complete on x%x x%x x%x\n",
  1731. did, ndlp->nlp_type, ndlp->nlp_fc4_type);
  1732. if (!(ndlp->nlp_type & NLP_FABRIC) &&
  1733. (phba->nvmet_support == 0)) {
  1734. /* The driver supports FCP and NVME concurrently. If the
  1735. * ndlp's nlp_fc4_type is still zero, the driver doesn't
  1736. * know what PRLI to send yet. Figure that out now and
  1737. * call PRLI depending on the outcome.
  1738. */
  1739. if (vport->fc_flag & FC_PT2PT) {
  1740. /* If we are pt2pt, there is no Fabric to determine
  1741. * the FC4 type of the remote nport. So if NVME
  1742. * is configured try it.
  1743. */
  1744. ndlp->nlp_fc4_type |= NLP_FC4_FCP;
  1745. if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) &&
  1746. (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
  1747. vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
  1748. ndlp->nlp_fc4_type |= NLP_FC4_NVME;
  1749. /* We need to update the localport also */
  1750. lpfc_nvme_update_localport(vport);
  1751. }
  1752. } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
  1753. ndlp->nlp_fc4_type |= NLP_FC4_FCP;
  1754. } else if (ndlp->nlp_fc4_type == 0) {
  1755. /* If we are only configured for FCP, the driver
  1756. * should just issue PRLI for FCP. Otherwise issue
  1757. * GFT_ID to determine if remote port supports NVME.
  1758. */
  1759. if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
  1760. lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID, 0,
  1761. ndlp->nlp_DID);
  1762. return ndlp->nlp_state;
  1763. }
  1764. ndlp->nlp_fc4_type = NLP_FC4_FCP;
  1765. }
  1766. ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
  1767. lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
  1768. if (lpfc_issue_els_prli(vport, ndlp, 0)) {
  1769. lpfc_issue_els_logo(vport, ndlp, 0);
  1770. ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
  1771. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1772. }
  1773. } else {
  1774. if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support)
  1775. phba->targetport->port_id = vport->fc_myDID;
  1776. /* Only Fabric ports should transition. NVME target
  1777. * must complete PRLI.
  1778. */
  1779. if (ndlp->nlp_type & NLP_FABRIC) {
  1780. ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
  1781. ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
  1782. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  1783. }
  1784. }
  1785. return ndlp->nlp_state;
  1786. }
  1787. static uint32_t
  1788. lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
  1789. struct lpfc_nodelist *ndlp,
  1790. void *arg,
  1791. uint32_t evt)
  1792. {
  1793. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  1794. spin_lock_irq(&ndlp->lock);
  1795. ndlp->nlp_flag |= NLP_NODEV_REMOVE;
  1796. spin_unlock_irq(&ndlp->lock);
  1797. return ndlp->nlp_state;
  1798. } else {
  1799. lpfc_drop_node(vport, ndlp);
  1800. return NLP_STE_FREED_NODE;
  1801. }
  1802. }
  1803. static uint32_t
  1804. lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
  1805. struct lpfc_nodelist *ndlp,
  1806. void *arg,
  1807. uint32_t evt)
  1808. {
  1809. /* Don't do anything that will mess up processing of the
  1810. * previous RSCN.
  1811. */
  1812. if (vport->fc_flag & FC_RSCN_DEFERRED)
  1813. return ndlp->nlp_state;
  1814. ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
  1815. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1816. spin_lock_irq(&ndlp->lock);
  1817. /* If we are a target we won't immediately transition into PRLI,
  1818. * so if REG_LOGIN already completed we don't need to ignore it.
  1819. */
  1820. if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) ||
  1821. !vport->phba->nvmet_support)
  1822. ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
  1823. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  1824. spin_unlock_irq(&ndlp->lock);
  1825. lpfc_disc_set_adisc(vport, ndlp);
  1826. return ndlp->nlp_state;
  1827. }
  1828. static uint32_t
  1829. lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1830. void *arg, uint32_t evt)
  1831. {
  1832. struct lpfc_iocbq *cmdiocb;
  1833. cmdiocb = (struct lpfc_iocbq *) arg;
  1834. lpfc_rcv_plogi(vport, ndlp, cmdiocb);
  1835. return ndlp->nlp_state;
  1836. }
  1837. static uint32_t
  1838. lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1839. void *arg, uint32_t evt)
  1840. {
  1841. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1842. if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
  1843. return ndlp->nlp_state;
  1844. lpfc_rcv_prli(vport, ndlp, cmdiocb);
  1845. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  1846. return ndlp->nlp_state;
  1847. }
  1848. static uint32_t
  1849. lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1850. void *arg, uint32_t evt)
  1851. {
  1852. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1853. /* Software abort outstanding PRLI before sending acc */
  1854. lpfc_els_abort(vport->phba, ndlp);
  1855. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  1856. return ndlp->nlp_state;
  1857. }
  1858. static uint32_t
  1859. lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1860. void *arg, uint32_t evt)
  1861. {
  1862. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1863. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  1864. return ndlp->nlp_state;
  1865. }
  1866. /* This routine is envoked when we rcv a PRLO request from a nport
  1867. * we are logged into. We should send back a PRLO rsp setting the
  1868. * appropriate bits.
  1869. * NEXT STATE = PRLI_ISSUE
  1870. */
  1871. static uint32_t
  1872. lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1873. void *arg, uint32_t evt)
  1874. {
  1875. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1876. lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
  1877. return ndlp->nlp_state;
  1878. }
  1879. static uint32_t
  1880. lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1881. void *arg, uint32_t evt)
  1882. {
  1883. struct lpfc_iocbq *cmdiocb, *rspiocb;
  1884. struct lpfc_hba *phba = vport->phba;
  1885. PRLI *npr;
  1886. struct lpfc_nvme_prli *nvpr;
  1887. void *temp_ptr;
  1888. u32 ulp_status;
  1889. cmdiocb = (struct lpfc_iocbq *) arg;
  1890. rspiocb = cmdiocb->rsp_iocb;
  1891. ulp_status = get_job_ulpstatus(phba, rspiocb);
  1892. /* A solicited PRLI is either FCP or NVME. The PRLI cmd/rsp
  1893. * format is different so NULL the two PRLI types so that the
  1894. * driver correctly gets the correct context.
  1895. */
  1896. npr = NULL;
  1897. nvpr = NULL;
  1898. temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
  1899. if (cmdiocb->cmd_flag & LPFC_PRLI_FCP_REQ)
  1900. npr = (PRLI *) temp_ptr;
  1901. else if (cmdiocb->cmd_flag & LPFC_PRLI_NVME_REQ)
  1902. nvpr = (struct lpfc_nvme_prli *) temp_ptr;
  1903. if (ulp_status) {
  1904. if ((vport->port_type == LPFC_NPIV_PORT) &&
  1905. vport->cfg_restrict_login) {
  1906. goto out;
  1907. }
  1908. /* Adjust the nlp_type accordingly if the PRLI failed */
  1909. if (npr)
  1910. ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
  1911. if (nvpr)
  1912. ndlp->nlp_fc4_type &= ~NLP_FC4_NVME;
  1913. /* We can't set the DSM state till BOTH PRLIs complete */
  1914. goto out_err;
  1915. }
  1916. if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
  1917. (npr->prliType == PRLI_FCP_TYPE)) {
  1918. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
  1919. "6028 FCP NPR PRLI Cmpl Init %d Target %d\n",
  1920. npr->initiatorFunc,
  1921. npr->targetFunc);
  1922. if (npr->initiatorFunc)
  1923. ndlp->nlp_type |= NLP_FCP_INITIATOR;
  1924. if (npr->targetFunc) {
  1925. ndlp->nlp_type |= NLP_FCP_TARGET;
  1926. if (npr->writeXferRdyDis)
  1927. ndlp->nlp_flag |= NLP_FIRSTBURST;
  1928. }
  1929. if (npr->Retry)
  1930. ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
  1931. } else if (nvpr &&
  1932. (bf_get_be32(prli_acc_rsp_code, nvpr) ==
  1933. PRLI_REQ_EXECUTED) &&
  1934. (bf_get_be32(prli_type_code, nvpr) ==
  1935. PRLI_NVME_TYPE)) {
  1936. /* Complete setting up the remote ndlp personality. */
  1937. if (bf_get_be32(prli_init, nvpr))
  1938. ndlp->nlp_type |= NLP_NVME_INITIATOR;
  1939. if (phba->nsler && bf_get_be32(prli_nsler, nvpr) &&
  1940. bf_get_be32(prli_conf, nvpr))
  1941. ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
  1942. else
  1943. ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
  1944. /* Target driver cannot solicit NVME FB. */
  1945. if (bf_get_be32(prli_tgt, nvpr)) {
  1946. /* Complete the nvme target roles. The transport
  1947. * needs to know if the rport is capable of
  1948. * discovery in addition to its role.
  1949. */
  1950. ndlp->nlp_type |= NLP_NVME_TARGET;
  1951. if (bf_get_be32(prli_disc, nvpr))
  1952. ndlp->nlp_type |= NLP_NVME_DISCOVERY;
  1953. /*
  1954. * If prli_fba is set, the Target supports FirstBurst.
  1955. * If prli_fb_sz is 0, the FirstBurst size is unlimited,
  1956. * otherwise it defines the actual size supported by
  1957. * the NVME Target.
  1958. */
  1959. if ((bf_get_be32(prli_fba, nvpr) == 1) &&
  1960. (phba->cfg_nvme_enable_fb) &&
  1961. (!phba->nvmet_support)) {
  1962. /* Both sides support FB. The target's first
  1963. * burst size is a 512 byte encoded value.
  1964. */
  1965. ndlp->nlp_flag |= NLP_FIRSTBURST;
  1966. ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz,
  1967. nvpr);
  1968. /* Expressed in units of 512 bytes */
  1969. if (ndlp->nvme_fb_size)
  1970. ndlp->nvme_fb_size <<=
  1971. LPFC_NVME_FB_SHIFT;
  1972. else
  1973. ndlp->nvme_fb_size = LPFC_NVME_MAX_FB;
  1974. }
  1975. }
  1976. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
  1977. "6029 NVME PRLI Cmpl w1 x%08x "
  1978. "w4 x%08x w5 x%08x flag x%x, "
  1979. "fcp_info x%x nlp_type x%x\n",
  1980. be32_to_cpu(nvpr->word1),
  1981. be32_to_cpu(nvpr->word4),
  1982. be32_to_cpu(nvpr->word5),
  1983. ndlp->nlp_flag, ndlp->nlp_fcp_info,
  1984. ndlp->nlp_type);
  1985. }
  1986. if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
  1987. (vport->port_type == LPFC_NPIV_PORT) &&
  1988. vport->cfg_restrict_login) {
  1989. out:
  1990. spin_lock_irq(&ndlp->lock);
  1991. ndlp->nlp_flag |= NLP_TARGET_REMOVE;
  1992. spin_unlock_irq(&ndlp->lock);
  1993. lpfc_issue_els_logo(vport, ndlp, 0);
  1994. ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
  1995. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1996. return ndlp->nlp_state;
  1997. }
  1998. out_err:
  1999. /* The ndlp state cannot move to MAPPED or UNMAPPED before all PRLIs
  2000. * are complete.
  2001. */
  2002. if (ndlp->fc4_prli_sent == 0) {
  2003. ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
  2004. if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
  2005. lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
  2006. else if (ndlp->nlp_type &
  2007. (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR))
  2008. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  2009. } else
  2010. lpfc_printf_vlog(vport,
  2011. KERN_INFO, LOG_ELS,
  2012. "3067 PRLI's still outstanding "
  2013. "on x%06x - count %d, Pend Node Mode "
  2014. "transition...\n",
  2015. ndlp->nlp_DID, ndlp->fc4_prli_sent);
  2016. return ndlp->nlp_state;
  2017. }
  2018. /*! lpfc_device_rm_prli_issue
  2019. *
  2020. * \pre
  2021. * \post
  2022. * \param phba
  2023. * \param ndlp
  2024. * \param arg
  2025. * \param evt
  2026. * \return uint32_t
  2027. *
  2028. * \b Description:
  2029. * This routine is envoked when we a request to remove a nport we are in the
  2030. * process of PRLIing. We should software abort outstanding prli, unreg
  2031. * login, send a logout. We will change node state to UNUSED_NODE, put it
  2032. * on plogi list so it can be freed when LOGO completes.
  2033. *
  2034. */
  2035. static uint32_t
  2036. lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2037. void *arg, uint32_t evt)
  2038. {
  2039. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  2040. spin_lock_irq(&ndlp->lock);
  2041. ndlp->nlp_flag |= NLP_NODEV_REMOVE;
  2042. spin_unlock_irq(&ndlp->lock);
  2043. return ndlp->nlp_state;
  2044. } else {
  2045. /* software abort outstanding PLOGI */
  2046. lpfc_els_abort(vport->phba, ndlp);
  2047. lpfc_drop_node(vport, ndlp);
  2048. return NLP_STE_FREED_NODE;
  2049. }
  2050. }
  2051. /*! lpfc_device_recov_prli_issue
  2052. *
  2053. * \pre
  2054. * \post
  2055. * \param phba
  2056. * \param ndlp
  2057. * \param arg
  2058. * \param evt
  2059. * \return uint32_t
  2060. *
  2061. * \b Description:
  2062. * The routine is envoked when the state of a device is unknown, like
  2063. * during a link down. We should remove the nodelist entry from the
  2064. * unmapped list, issue a UNREG_LOGIN, do a software abort of the
  2065. * outstanding PRLI command, then free the node entry.
  2066. */
  2067. static uint32_t
  2068. lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
  2069. struct lpfc_nodelist *ndlp,
  2070. void *arg,
  2071. uint32_t evt)
  2072. {
  2073. struct lpfc_hba *phba = vport->phba;
  2074. /* Don't do anything that will mess up processing of the
  2075. * previous RSCN.
  2076. */
  2077. if (vport->fc_flag & FC_RSCN_DEFERRED)
  2078. return ndlp->nlp_state;
  2079. /* software abort outstanding PRLI */
  2080. lpfc_els_abort(phba, ndlp);
  2081. ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
  2082. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  2083. spin_lock_irq(&ndlp->lock);
  2084. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  2085. spin_unlock_irq(&ndlp->lock);
  2086. lpfc_disc_set_adisc(vport, ndlp);
  2087. return ndlp->nlp_state;
  2088. }
  2089. static uint32_t
  2090. lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2091. void *arg, uint32_t evt)
  2092. {
  2093. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
  2094. struct ls_rjt stat;
  2095. memset(&stat, 0, sizeof(struct ls_rjt));
  2096. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  2097. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  2098. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  2099. return ndlp->nlp_state;
  2100. }
  2101. static uint32_t
  2102. lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2103. void *arg, uint32_t evt)
  2104. {
  2105. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
  2106. struct ls_rjt stat;
  2107. memset(&stat, 0, sizeof(struct ls_rjt));
  2108. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  2109. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  2110. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  2111. return ndlp->nlp_state;
  2112. }
  2113. static uint32_t
  2114. lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2115. void *arg, uint32_t evt)
  2116. {
  2117. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
  2118. spin_lock_irq(&ndlp->lock);
  2119. ndlp->nlp_flag |= NLP_LOGO_ACC;
  2120. spin_unlock_irq(&ndlp->lock);
  2121. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
  2122. return ndlp->nlp_state;
  2123. }
  2124. static uint32_t
  2125. lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2126. void *arg, uint32_t evt)
  2127. {
  2128. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
  2129. struct ls_rjt stat;
  2130. memset(&stat, 0, sizeof(struct ls_rjt));
  2131. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  2132. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  2133. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  2134. return ndlp->nlp_state;
  2135. }
  2136. static uint32_t
  2137. lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2138. void *arg, uint32_t evt)
  2139. {
  2140. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
  2141. struct ls_rjt stat;
  2142. memset(&stat, 0, sizeof(struct ls_rjt));
  2143. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  2144. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  2145. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  2146. return ndlp->nlp_state;
  2147. }
  2148. static uint32_t
  2149. lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2150. void *arg, uint32_t evt)
  2151. {
  2152. ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
  2153. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  2154. spin_lock_irq(&ndlp->lock);
  2155. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  2156. spin_unlock_irq(&ndlp->lock);
  2157. lpfc_disc_set_adisc(vport, ndlp);
  2158. return ndlp->nlp_state;
  2159. }
  2160. static uint32_t
  2161. lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2162. void *arg, uint32_t evt)
  2163. {
  2164. /*
  2165. * DevLoss has timed out and is calling for Device Remove.
  2166. * In this case, abort the LOGO and cleanup the ndlp
  2167. */
  2168. lpfc_unreg_rpi(vport, ndlp);
  2169. /* software abort outstanding PLOGI */
  2170. lpfc_els_abort(vport->phba, ndlp);
  2171. lpfc_drop_node(vport, ndlp);
  2172. return NLP_STE_FREED_NODE;
  2173. }
  2174. static uint32_t
  2175. lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
  2176. struct lpfc_nodelist *ndlp,
  2177. void *arg, uint32_t evt)
  2178. {
  2179. /*
  2180. * Device Recovery events have no meaning for a node with a LOGO
  2181. * outstanding. The LOGO has to complete first and handle the
  2182. * node from that point.
  2183. */
  2184. return ndlp->nlp_state;
  2185. }
  2186. static uint32_t
  2187. lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2188. void *arg, uint32_t evt)
  2189. {
  2190. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2191. lpfc_rcv_plogi(vport, ndlp, cmdiocb);
  2192. return ndlp->nlp_state;
  2193. }
  2194. static uint32_t
  2195. lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2196. void *arg, uint32_t evt)
  2197. {
  2198. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2199. if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
  2200. return ndlp->nlp_state;
  2201. lpfc_rcv_prli(vport, ndlp, cmdiocb);
  2202. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  2203. return ndlp->nlp_state;
  2204. }
  2205. static uint32_t
  2206. lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2207. void *arg, uint32_t evt)
  2208. {
  2209. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2210. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  2211. return ndlp->nlp_state;
  2212. }
  2213. static uint32_t
  2214. lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2215. void *arg, uint32_t evt)
  2216. {
  2217. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2218. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  2219. return ndlp->nlp_state;
  2220. }
  2221. static uint32_t
  2222. lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2223. void *arg, uint32_t evt)
  2224. {
  2225. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2226. lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
  2227. return ndlp->nlp_state;
  2228. }
  2229. static uint32_t
  2230. lpfc_device_rm_unmap_node(struct lpfc_vport *vport,
  2231. struct lpfc_nodelist *ndlp,
  2232. void *arg,
  2233. uint32_t evt)
  2234. {
  2235. lpfc_drop_node(vport, ndlp);
  2236. return NLP_STE_FREED_NODE;
  2237. }
  2238. static uint32_t
  2239. lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
  2240. struct lpfc_nodelist *ndlp,
  2241. void *arg,
  2242. uint32_t evt)
  2243. {
  2244. ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
  2245. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  2246. spin_lock_irq(&ndlp->lock);
  2247. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  2248. ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
  2249. spin_unlock_irq(&ndlp->lock);
  2250. lpfc_disc_set_adisc(vport, ndlp);
  2251. return ndlp->nlp_state;
  2252. }
  2253. static uint32_t
  2254. lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2255. void *arg, uint32_t evt)
  2256. {
  2257. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2258. lpfc_rcv_plogi(vport, ndlp, cmdiocb);
  2259. return ndlp->nlp_state;
  2260. }
  2261. static uint32_t
  2262. lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2263. void *arg, uint32_t evt)
  2264. {
  2265. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2266. if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
  2267. return ndlp->nlp_state;
  2268. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  2269. return ndlp->nlp_state;
  2270. }
  2271. static uint32_t
  2272. lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2273. void *arg, uint32_t evt)
  2274. {
  2275. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2276. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  2277. return ndlp->nlp_state;
  2278. }
  2279. static uint32_t
  2280. lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
  2281. struct lpfc_nodelist *ndlp,
  2282. void *arg, uint32_t evt)
  2283. {
  2284. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2285. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  2286. return ndlp->nlp_state;
  2287. }
  2288. static uint32_t
  2289. lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2290. void *arg, uint32_t evt)
  2291. {
  2292. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2293. /* flush the target */
  2294. lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
  2295. /* Treat like rcv logo */
  2296. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
  2297. return ndlp->nlp_state;
  2298. }
  2299. static uint32_t
  2300. lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
  2301. struct lpfc_nodelist *ndlp,
  2302. void *arg,
  2303. uint32_t evt)
  2304. {
  2305. lpfc_disc_set_adisc(vport, ndlp);
  2306. ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
  2307. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  2308. spin_lock_irq(&ndlp->lock);
  2309. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  2310. ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
  2311. spin_unlock_irq(&ndlp->lock);
  2312. return ndlp->nlp_state;
  2313. }
  2314. static uint32_t
  2315. lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2316. void *arg, uint32_t evt)
  2317. {
  2318. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2319. /* Ignore PLOGI if we have an outstanding LOGO */
  2320. if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
  2321. return ndlp->nlp_state;
  2322. if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
  2323. lpfc_cancel_retry_delay_tmo(vport, ndlp);
  2324. spin_lock_irq(&ndlp->lock);
  2325. ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
  2326. spin_unlock_irq(&ndlp->lock);
  2327. } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
  2328. /* send PLOGI immediately, move to PLOGI issue state */
  2329. if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
  2330. ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
  2331. lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
  2332. lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
  2333. }
  2334. }
  2335. return ndlp->nlp_state;
  2336. }
  2337. static uint32_t
  2338. lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2339. void *arg, uint32_t evt)
  2340. {
  2341. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2342. struct ls_rjt stat;
  2343. memset(&stat, 0, sizeof (struct ls_rjt));
  2344. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  2345. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  2346. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
  2347. if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
  2348. /*
  2349. * ADISC nodes will be handled in regular discovery path after
  2350. * receiving response from NS.
  2351. *
  2352. * For other nodes, Send PLOGI to trigger an implicit LOGO.
  2353. */
  2354. if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
  2355. ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
  2356. lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
  2357. lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
  2358. }
  2359. }
  2360. return ndlp->nlp_state;
  2361. }
  2362. static uint32_t
  2363. lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2364. void *arg, uint32_t evt)
  2365. {
  2366. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2367. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  2368. return ndlp->nlp_state;
  2369. }
  2370. static uint32_t
  2371. lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2372. void *arg, uint32_t evt)
  2373. {
  2374. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2375. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  2376. /*
  2377. * Do not start discovery if discovery is about to start
  2378. * or discovery in progress for this node. Starting discovery
  2379. * here will affect the counting of discovery threads.
  2380. */
  2381. if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
  2382. !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
  2383. /*
  2384. * ADISC nodes will be handled in regular discovery path after
  2385. * receiving response from NS.
  2386. *
  2387. * For other nodes, Send PLOGI to trigger an implicit LOGO.
  2388. */
  2389. if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
  2390. ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
  2391. lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
  2392. lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
  2393. }
  2394. }
  2395. return ndlp->nlp_state;
  2396. }
  2397. static uint32_t
  2398. lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2399. void *arg, uint32_t evt)
  2400. {
  2401. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  2402. spin_lock_irq(&ndlp->lock);
  2403. ndlp->nlp_flag |= NLP_LOGO_ACC;
  2404. spin_unlock_irq(&ndlp->lock);
  2405. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
  2406. if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
  2407. mod_timer(&ndlp->nlp_delayfunc,
  2408. jiffies + msecs_to_jiffies(1000 * 1));
  2409. spin_lock_irq(&ndlp->lock);
  2410. ndlp->nlp_flag |= NLP_DELAY_TMO;
  2411. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  2412. spin_unlock_irq(&ndlp->lock);
  2413. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  2414. } else {
  2415. spin_lock_irq(&ndlp->lock);
  2416. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  2417. spin_unlock_irq(&ndlp->lock);
  2418. }
  2419. return ndlp->nlp_state;
  2420. }
  2421. static uint32_t
  2422. lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2423. void *arg, uint32_t evt)
  2424. {
  2425. struct lpfc_hba *phba = vport->phba;
  2426. struct lpfc_iocbq *cmdiocb, *rspiocb;
  2427. u32 ulp_status;
  2428. cmdiocb = (struct lpfc_iocbq *) arg;
  2429. rspiocb = cmdiocb->rsp_iocb;
  2430. ulp_status = get_job_ulpstatus(phba, rspiocb);
  2431. if (ulp_status)
  2432. return NLP_STE_FREED_NODE;
  2433. return ndlp->nlp_state;
  2434. }
  2435. static uint32_t
  2436. lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2437. void *arg, uint32_t evt)
  2438. {
  2439. struct lpfc_hba *phba = vport->phba;
  2440. struct lpfc_iocbq *cmdiocb, *rspiocb;
  2441. u32 ulp_status;
  2442. cmdiocb = (struct lpfc_iocbq *) arg;
  2443. rspiocb = cmdiocb->rsp_iocb;
  2444. ulp_status = get_job_ulpstatus(phba, rspiocb);
  2445. if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
  2446. lpfc_drop_node(vport, ndlp);
  2447. return NLP_STE_FREED_NODE;
  2448. }
  2449. return ndlp->nlp_state;
  2450. }
  2451. static uint32_t
  2452. lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2453. void *arg, uint32_t evt)
  2454. {
  2455. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  2456. /* For the fabric port just clear the fc flags. */
  2457. if (ndlp->nlp_DID == Fabric_DID) {
  2458. spin_lock_irq(shost->host_lock);
  2459. vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
  2460. spin_unlock_irq(shost->host_lock);
  2461. }
  2462. lpfc_unreg_rpi(vport, ndlp);
  2463. return ndlp->nlp_state;
  2464. }
  2465. static uint32_t
  2466. lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2467. void *arg, uint32_t evt)
  2468. {
  2469. struct lpfc_hba *phba = vport->phba;
  2470. struct lpfc_iocbq *cmdiocb, *rspiocb;
  2471. u32 ulp_status;
  2472. cmdiocb = (struct lpfc_iocbq *) arg;
  2473. rspiocb = cmdiocb->rsp_iocb;
  2474. ulp_status = get_job_ulpstatus(phba, rspiocb);
  2475. if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
  2476. lpfc_drop_node(vport, ndlp);
  2477. return NLP_STE_FREED_NODE;
  2478. }
  2479. return ndlp->nlp_state;
  2480. }
  2481. static uint32_t
  2482. lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
  2483. struct lpfc_nodelist *ndlp,
  2484. void *arg, uint32_t evt)
  2485. {
  2486. LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
  2487. MAILBOX_t *mb = &pmb->u.mb;
  2488. if (!mb->mbxStatus) {
  2489. /* SLI4 ports have preallocated logical rpis. */
  2490. if (vport->phba->sli_rev < LPFC_SLI_REV4)
  2491. ndlp->nlp_rpi = mb->un.varWords[0];
  2492. ndlp->nlp_flag |= NLP_RPI_REGISTERED;
  2493. if (ndlp->nlp_flag & NLP_LOGO_ACC) {
  2494. lpfc_unreg_rpi(vport, ndlp);
  2495. }
  2496. } else {
  2497. if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
  2498. lpfc_drop_node(vport, ndlp);
  2499. return NLP_STE_FREED_NODE;
  2500. }
  2501. }
  2502. return ndlp->nlp_state;
  2503. }
  2504. static uint32_t
  2505. lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2506. void *arg, uint32_t evt)
  2507. {
  2508. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  2509. spin_lock_irq(&ndlp->lock);
  2510. ndlp->nlp_flag |= NLP_NODEV_REMOVE;
  2511. spin_unlock_irq(&ndlp->lock);
  2512. return ndlp->nlp_state;
  2513. }
  2514. lpfc_drop_node(vport, ndlp);
  2515. return NLP_STE_FREED_NODE;
  2516. }
  2517. static uint32_t
  2518. lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2519. void *arg, uint32_t evt)
  2520. {
  2521. /* Don't do anything that will mess up processing of the
  2522. * previous RSCN.
  2523. */
  2524. if (vport->fc_flag & FC_RSCN_DEFERRED)
  2525. return ndlp->nlp_state;
  2526. lpfc_cancel_retry_delay_tmo(vport, ndlp);
  2527. spin_lock_irq(&ndlp->lock);
  2528. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  2529. ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
  2530. spin_unlock_irq(&ndlp->lock);
  2531. return ndlp->nlp_state;
  2532. }
  2533. /* This next section defines the NPort Discovery State Machine */
  2534. /* There are 4 different double linked lists nodelist entries can reside on.
  2535. * The plogi list and adisc list are used when Link Up discovery or RSCN
  2536. * processing is needed. Each list holds the nodes that we will send PLOGI
  2537. * or ADISC on. These lists will keep track of what nodes will be effected
  2538. * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
  2539. * The unmapped_list will contain all nodes that we have successfully logged
  2540. * into at the Fibre Channel level. The mapped_list will contain all nodes
  2541. * that are mapped FCP targets.
  2542. */
  2543. /*
  2544. * The bind list is a list of undiscovered (potentially non-existent) nodes
  2545. * that we have saved binding information on. This information is used when
  2546. * nodes transition from the unmapped to the mapped list.
  2547. */
  2548. /* For UNUSED_NODE state, the node has just been allocated .
  2549. * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
  2550. * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
  2551. * and put on the unmapped list. For ADISC processing, the node is taken off
  2552. * the ADISC list and placed on either the mapped or unmapped list (depending
  2553. * on its previous state). Once on the unmapped list, a PRLI is issued and the
  2554. * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
  2555. * changed to UNMAPPED_NODE. If the completion indicates a mapped
  2556. * node, the node is taken off the unmapped list. The binding list is checked
  2557. * for a valid binding, or a binding is automatically assigned. If binding
  2558. * assignment is unsuccessful, the node is left on the unmapped list. If
  2559. * binding assignment is successful, the associated binding list entry (if
  2560. * any) is removed, and the node is placed on the mapped list.
  2561. */
  2562. /*
  2563. * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
  2564. * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
  2565. * expire, all effected nodes will receive a DEVICE_RM event.
  2566. */
  2567. /*
  2568. * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
  2569. * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
  2570. * check, additional nodes may be added or removed (via DEVICE_RM) to / from
  2571. * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
  2572. * we will first process the ADISC list. 32 entries are processed initially and
  2573. * ADISC is initited for each one. Completions / Events for each node are
  2574. * funnelled thru the state machine. As each node finishes ADISC processing, it
  2575. * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
  2576. * waiting, and the ADISC list count is identically 0, then we are done. For
  2577. * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
  2578. * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
  2579. * list. 32 entries are processed initially and PLOGI is initited for each one.
  2580. * Completions / Events for each node are funnelled thru the state machine. As
  2581. * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
  2582. * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
  2583. * indentically 0, then we are done. We have now completed discovery / RSCN
  2584. * handling. Upon completion, ALL nodes should be on either the mapped or
  2585. * unmapped lists.
  2586. */
  2587. static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
  2588. (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
  2589. /* Action routine Event Current State */
  2590. lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
  2591. lpfc_rcv_els_unused_node, /* RCV_PRLI */
  2592. lpfc_rcv_logo_unused_node, /* RCV_LOGO */
  2593. lpfc_rcv_els_unused_node, /* RCV_ADISC */
  2594. lpfc_rcv_els_unused_node, /* RCV_PDISC */
  2595. lpfc_rcv_els_unused_node, /* RCV_PRLO */
  2596. lpfc_disc_illegal, /* CMPL_PLOGI */
  2597. lpfc_disc_illegal, /* CMPL_PRLI */
  2598. lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */
  2599. lpfc_disc_illegal, /* CMPL_ADISC */
  2600. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  2601. lpfc_device_rm_unused_node, /* DEVICE_RM */
  2602. lpfc_device_recov_unused_node, /* DEVICE_RECOVERY */
  2603. lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
  2604. lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */
  2605. lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */
  2606. lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
  2607. lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
  2608. lpfc_rcv_els_plogi_issue, /* RCV_PRLO */
  2609. lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */
  2610. lpfc_disc_illegal, /* CMPL_PRLI */
  2611. lpfc_cmpl_logo_plogi_issue, /* CMPL_LOGO */
  2612. lpfc_disc_illegal, /* CMPL_ADISC */
  2613. lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN */
  2614. lpfc_device_rm_plogi_issue, /* DEVICE_RM */
  2615. lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */
  2616. lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */
  2617. lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */
  2618. lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */
  2619. lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */
  2620. lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */
  2621. lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */
  2622. lpfc_disc_illegal, /* CMPL_PLOGI */
  2623. lpfc_disc_illegal, /* CMPL_PRLI */
  2624. lpfc_disc_illegal, /* CMPL_LOGO */
  2625. lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */
  2626. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  2627. lpfc_device_rm_adisc_issue, /* DEVICE_RM */
  2628. lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */
  2629. lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */
  2630. lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */
  2631. lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */
  2632. lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */
  2633. lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */
  2634. lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */
  2635. lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
  2636. lpfc_disc_illegal, /* CMPL_PRLI */
  2637. lpfc_disc_illegal, /* CMPL_LOGO */
  2638. lpfc_disc_illegal, /* CMPL_ADISC */
  2639. lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */
  2640. lpfc_device_rm_reglogin_issue, /* DEVICE_RM */
  2641. lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
  2642. lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */
  2643. lpfc_rcv_prli_prli_issue, /* RCV_PRLI */
  2644. lpfc_rcv_logo_prli_issue, /* RCV_LOGO */
  2645. lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */
  2646. lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */
  2647. lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */
  2648. lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
  2649. lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */
  2650. lpfc_disc_illegal, /* CMPL_LOGO */
  2651. lpfc_disc_illegal, /* CMPL_ADISC */
  2652. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  2653. lpfc_device_rm_prli_issue, /* DEVICE_RM */
  2654. lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
  2655. lpfc_rcv_plogi_logo_issue, /* RCV_PLOGI LOGO_ISSUE */
  2656. lpfc_rcv_prli_logo_issue, /* RCV_PRLI */
  2657. lpfc_rcv_logo_logo_issue, /* RCV_LOGO */
  2658. lpfc_rcv_padisc_logo_issue, /* RCV_ADISC */
  2659. lpfc_rcv_padisc_logo_issue, /* RCV_PDISC */
  2660. lpfc_rcv_prlo_logo_issue, /* RCV_PRLO */
  2661. lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
  2662. lpfc_disc_illegal, /* CMPL_PRLI */
  2663. lpfc_cmpl_logo_logo_issue, /* CMPL_LOGO */
  2664. lpfc_disc_illegal, /* CMPL_ADISC */
  2665. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  2666. lpfc_device_rm_logo_issue, /* DEVICE_RM */
  2667. lpfc_device_recov_logo_issue, /* DEVICE_RECOVERY */
  2668. lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
  2669. lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
  2670. lpfc_rcv_logo_unmap_node, /* RCV_LOGO */
  2671. lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */
  2672. lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */
  2673. lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */
  2674. lpfc_disc_illegal, /* CMPL_PLOGI */
  2675. lpfc_disc_illegal, /* CMPL_PRLI */
  2676. lpfc_disc_illegal, /* CMPL_LOGO */
  2677. lpfc_disc_illegal, /* CMPL_ADISC */
  2678. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  2679. lpfc_device_rm_unmap_node, /* DEVICE_RM */
  2680. lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */
  2681. lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */
  2682. lpfc_rcv_prli_mapped_node, /* RCV_PRLI */
  2683. lpfc_rcv_logo_mapped_node, /* RCV_LOGO */
  2684. lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */
  2685. lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */
  2686. lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */
  2687. lpfc_disc_illegal, /* CMPL_PLOGI */
  2688. lpfc_disc_illegal, /* CMPL_PRLI */
  2689. lpfc_disc_illegal, /* CMPL_LOGO */
  2690. lpfc_disc_illegal, /* CMPL_ADISC */
  2691. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  2692. lpfc_disc_illegal, /* DEVICE_RM */
  2693. lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */
  2694. lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */
  2695. lpfc_rcv_prli_npr_node, /* RCV_PRLI */
  2696. lpfc_rcv_logo_npr_node, /* RCV_LOGO */
  2697. lpfc_rcv_padisc_npr_node, /* RCV_ADISC */
  2698. lpfc_rcv_padisc_npr_node, /* RCV_PDISC */
  2699. lpfc_rcv_prlo_npr_node, /* RCV_PRLO */
  2700. lpfc_cmpl_plogi_npr_node, /* CMPL_PLOGI */
  2701. lpfc_cmpl_prli_npr_node, /* CMPL_PRLI */
  2702. lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */
  2703. lpfc_cmpl_adisc_npr_node, /* CMPL_ADISC */
  2704. lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */
  2705. lpfc_device_rm_npr_node, /* DEVICE_RM */
  2706. lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */
  2707. };
  2708. int
  2709. lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  2710. void *arg, uint32_t evt)
  2711. {
  2712. uint32_t cur_state, rc;
  2713. uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
  2714. uint32_t);
  2715. uint32_t got_ndlp = 0;
  2716. uint32_t data1;
  2717. if (lpfc_nlp_get(ndlp))
  2718. got_ndlp = 1;
  2719. cur_state = ndlp->nlp_state;
  2720. data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
  2721. ((uint32_t)ndlp->nlp_type));
  2722. /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
  2723. lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
  2724. "0211 DSM in event x%x on NPort x%x in "
  2725. "state %d rpi x%x Data: x%x x%x\n",
  2726. evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi,
  2727. ndlp->nlp_flag, data1);
  2728. lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
  2729. "DSM in: evt:%d ste:%d did:x%x",
  2730. evt, cur_state, ndlp->nlp_DID);
  2731. func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
  2732. rc = (func) (vport, ndlp, arg, evt);
  2733. /* DSM out state <rc> on NPort <nlp_DID> */
  2734. if (got_ndlp) {
  2735. data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
  2736. ((uint32_t)ndlp->nlp_type));
  2737. lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
  2738. "0212 DSM out state %d on NPort x%x "
  2739. "rpi x%x Data: x%x x%x\n",
  2740. rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag,
  2741. data1);
  2742. lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
  2743. "DSM out: ste:%d did:x%x flg:x%x",
  2744. rc, ndlp->nlp_DID, ndlp->nlp_flag);
  2745. /* Decrement the ndlp reference count held for this function */
  2746. lpfc_nlp_put(ndlp);
  2747. } else {
  2748. lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
  2749. "0213 DSM out state %d on NPort free\n", rc);
  2750. lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
  2751. "DSM out: ste:%d did:x%x flg:x%x",
  2752. rc, 0, 0);
  2753. }
  2754. return rc;
  2755. }