lpfc_nvme.c 83 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
  5. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
  6. * Copyright (C) 2004-2016 Emulex. All rights reserved. *
  7. * EMULEX and SLI are trademarks of Emulex. *
  8. * www.broadcom.com *
  9. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  10. * *
  11. * This program is free software; you can redistribute it and/or *
  12. * modify it under the terms of version 2 of the GNU General *
  13. * Public License as published by the Free Software Foundation. *
  14. * This program is distributed in the hope that it will be useful. *
  15. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  16. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  17. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  18. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  20. * more details, a copy of which can be found in the file COPYING *
  21. * included with this package. *
  22. ********************************************************************/
  23. #include <linux/pci.h>
  24. #include <linux/slab.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/delay.h>
  27. #include <asm/unaligned.h>
  28. #include <linux/crc-t10dif.h>
  29. #include <net/checksum.h>
  30. #include <scsi/scsi.h>
  31. #include <scsi/scsi_device.h>
  32. #include <scsi/scsi_eh.h>
  33. #include <scsi/scsi_host.h>
  34. #include <scsi/scsi_tcq.h>
  35. #include <scsi/scsi_transport_fc.h>
  36. #include <scsi/fc/fc_fs.h>
  37. #include "lpfc_version.h"
  38. #include "lpfc_hw4.h"
  39. #include "lpfc_hw.h"
  40. #include "lpfc_sli.h"
  41. #include "lpfc_sli4.h"
  42. #include "lpfc_nl.h"
  43. #include "lpfc_disc.h"
  44. #include "lpfc.h"
  45. #include "lpfc_nvme.h"
  46. #include "lpfc_scsi.h"
  47. #include "lpfc_logmsg.h"
  48. #include "lpfc_crtn.h"
  49. #include "lpfc_vport.h"
  50. #include "lpfc_debugfs.h"
  51. /* NVME initiator-based functions */
  52. static struct lpfc_io_buf *
  53. lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
  54. int idx, int expedite);
  55. static void
  56. lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
  57. static struct nvme_fc_port_template lpfc_nvme_template;
  58. /**
  59. * lpfc_nvme_create_queue -
  60. * @pnvme_lport: Transport localport that LS is to be issued from
  61. * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
  62. * @qsize: Size of the queue in bytes
  63. * @handle: An opaque driver handle used in follow-up calls.
  64. *
  65. * Driver registers this routine to preallocate and initialize any
  66. * internal data structures to bind the @qidx to its internal IO queues.
  67. * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
  68. *
  69. * Return value :
  70. * 0 - Success
  71. * -EINVAL - Unsupported input value.
  72. * -ENOMEM - Could not alloc necessary memory
  73. **/
  74. static int
  75. lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
  76. unsigned int qidx, u16 qsize,
  77. void **handle)
  78. {
  79. struct lpfc_nvme_lport *lport;
  80. struct lpfc_vport *vport;
  81. struct lpfc_nvme_qhandle *qhandle;
  82. char *str;
  83. if (!pnvme_lport->private)
  84. return -ENOMEM;
  85. lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
  86. vport = lport->vport;
  87. if (!vport || vport->load_flag & FC_UNLOADING ||
  88. vport->phba->hba_flag & HBA_IOQ_FLUSH)
  89. return -ENODEV;
  90. qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
  91. if (qhandle == NULL)
  92. return -ENOMEM;
  93. qhandle->cpu_id = raw_smp_processor_id();
  94. qhandle->qidx = qidx;
  95. /*
  96. * NVME qidx == 0 is the admin queue, so both admin queue
  97. * and first IO queue will use MSI-X vector and associated
  98. * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
  99. */
  100. if (qidx) {
  101. str = "IO "; /* IO queue */
  102. qhandle->index = ((qidx - 1) %
  103. lpfc_nvme_template.max_hw_queues);
  104. } else {
  105. str = "ADM"; /* Admin queue */
  106. qhandle->index = qidx;
  107. }
  108. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
  109. "6073 Binding %s HdwQueue %d (cpu %d) to "
  110. "hdw_queue %d qhandle x%px\n", str,
  111. qidx, qhandle->cpu_id, qhandle->index, qhandle);
  112. *handle = (void *)qhandle;
  113. return 0;
  114. }
  115. /**
  116. * lpfc_nvme_delete_queue -
  117. * @pnvme_lport: Transport localport that LS is to be issued from
  118. * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
  119. * @handle: An opaque driver handle from lpfc_nvme_create_queue
  120. *
  121. * Driver registers this routine to free
  122. * any internal data structures to bind the @qidx to its internal
  123. * IO queues.
  124. *
  125. * Return value :
  126. * 0 - Success
  127. * TODO: What are the failure codes.
  128. **/
  129. static void
  130. lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
  131. unsigned int qidx,
  132. void *handle)
  133. {
  134. struct lpfc_nvme_lport *lport;
  135. struct lpfc_vport *vport;
  136. if (!pnvme_lport->private)
  137. return;
  138. lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
  139. vport = lport->vport;
  140. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
  141. "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
  142. lport, qidx, handle);
  143. kfree(handle);
  144. }
  145. static void
  146. lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
  147. {
  148. struct lpfc_nvme_lport *lport = localport->private;
  149. lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
  150. "6173 localport x%px delete complete\n",
  151. lport);
  152. /* release any threads waiting for the unreg to complete */
  153. if (lport->vport->localport)
  154. complete(lport->lport_unreg_cmp);
  155. }
  156. /* lpfc_nvme_remoteport_delete
  157. *
  158. * @remoteport: Pointer to an nvme transport remoteport instance.
  159. *
  160. * This is a template downcall. NVME transport calls this function
  161. * when it has completed the unregistration of a previously
  162. * registered remoteport.
  163. *
  164. * Return value :
  165. * None
  166. */
  167. static void
  168. lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
  169. {
  170. struct lpfc_nvme_rport *rport = remoteport->private;
  171. struct lpfc_vport *vport;
  172. struct lpfc_nodelist *ndlp;
  173. u32 fc4_xpt_flags;
  174. ndlp = rport->ndlp;
  175. if (!ndlp) {
  176. pr_err("**** %s: NULL ndlp on rport x%px remoteport x%px\n",
  177. __func__, rport, remoteport);
  178. goto rport_err;
  179. }
  180. vport = ndlp->vport;
  181. if (!vport) {
  182. pr_err("**** %s: Null vport on ndlp x%px, ste x%x rport x%px\n",
  183. __func__, ndlp, ndlp->nlp_state, rport);
  184. goto rport_err;
  185. }
  186. fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD;
  187. /* Remove this rport from the lport's list - memory is owned by the
  188. * transport. Remove the ndlp reference for the NVME transport before
  189. * calling state machine to remove the node.
  190. */
  191. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
  192. "6146 remoteport delete of remoteport x%px, ndlp x%px "
  193. "DID x%x xflags x%x\n",
  194. remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags);
  195. spin_lock_irq(&ndlp->lock);
  196. /* The register rebind might have occurred before the delete
  197. * downcall. Guard against this race.
  198. */
  199. if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT)
  200. ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD);
  201. spin_unlock_irq(&ndlp->lock);
  202. /* On a devloss timeout event, one more put is executed provided the
  203. * NVME and SCSI rport unregister requests are complete. If the vport
  204. * is unloading, this extra put is executed by lpfc_drop_node.
  205. */
  206. if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
  207. lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
  208. rport_err:
  209. return;
  210. }
  211. /**
  212. * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request
  213. * @phba: pointer to lpfc hba data structure.
  214. * @axchg: pointer to exchange context for the NVME LS request
  215. *
  216. * This routine is used for processing an asychronously received NVME LS
  217. * request. Any remaining validation is done and the LS is then forwarded
  218. * to the nvme-fc transport via nvme_fc_rcv_ls_req().
  219. *
  220. * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing)
  221. * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done.
  222. * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
  223. *
  224. * Returns 0 if LS was handled and delivered to the transport
  225. * Returns 1 if LS failed to be handled and should be dropped
  226. */
  227. int
  228. lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
  229. struct lpfc_async_xchg_ctx *axchg)
  230. {
  231. #if (IS_ENABLED(CONFIG_NVME_FC))
  232. struct lpfc_vport *vport;
  233. struct lpfc_nvme_rport *lpfc_rport;
  234. struct nvme_fc_remote_port *remoteport;
  235. struct lpfc_nvme_lport *lport;
  236. uint32_t *payload = axchg->payload;
  237. int rc;
  238. vport = axchg->ndlp->vport;
  239. lpfc_rport = axchg->ndlp->nrport;
  240. if (!lpfc_rport)
  241. return -EINVAL;
  242. remoteport = lpfc_rport->remoteport;
  243. if (!vport->localport ||
  244. vport->phba->hba_flag & HBA_IOQ_FLUSH)
  245. return -EINVAL;
  246. lport = vport->localport->private;
  247. if (!lport)
  248. return -EINVAL;
  249. rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload,
  250. axchg->size);
  251. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
  252. "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x "
  253. "%08x %08x %08x\n",
  254. axchg->size, rc,
  255. *payload, *(payload+1), *(payload+2),
  256. *(payload+3), *(payload+4), *(payload+5));
  257. if (!rc)
  258. return 0;
  259. #endif
  260. return 1;
  261. }
  262. /**
  263. * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME
  264. * LS request.
  265. * @phba: Pointer to HBA context object
  266. * @vport: The local port that issued the LS
  267. * @cmdwqe: Pointer to driver command WQE object.
  268. * @wcqe: Pointer to driver response CQE object.
  269. *
  270. * This function is the generic completion handler for NVME LS requests.
  271. * The function updates any states and statistics, calls the transport
  272. * ls_req done() routine, then tears down the command and buffers used
  273. * for the LS request.
  274. **/
  275. void
  276. __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
  277. struct lpfc_iocbq *cmdwqe,
  278. struct lpfc_wcqe_complete *wcqe)
  279. {
  280. struct nvmefc_ls_req *pnvme_lsreq;
  281. struct lpfc_dmabuf *buf_ptr;
  282. struct lpfc_nodelist *ndlp;
  283. uint32_t status;
  284. pnvme_lsreq = cmdwqe->context_un.nvme_lsreq;
  285. ndlp = cmdwqe->ndlp;
  286. buf_ptr = cmdwqe->bpl_dmabuf;
  287. status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
  288. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
  289. "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x "
  290. "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
  291. "ndlp:x%px\n",
  292. pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
  293. cmdwqe->sli4_xritag, status,
  294. (wcqe->parameter & 0xffff),
  295. cmdwqe, pnvme_lsreq, cmdwqe->bpl_dmabuf,
  296. ndlp);
  297. lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
  298. cmdwqe->sli4_xritag, status, wcqe->parameter);
  299. if (buf_ptr) {
  300. lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
  301. kfree(buf_ptr);
  302. cmdwqe->bpl_dmabuf = NULL;
  303. }
  304. if (pnvme_lsreq->done)
  305. pnvme_lsreq->done(pnvme_lsreq, status);
  306. else
  307. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  308. "6046 NVMEx cmpl without done call back? "
  309. "Data x%px DID %x Xri: %x status %x\n",
  310. pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
  311. cmdwqe->sli4_xritag, status);
  312. if (ndlp) {
  313. lpfc_nlp_put(ndlp);
  314. cmdwqe->ndlp = NULL;
  315. }
  316. lpfc_sli_release_iocbq(phba, cmdwqe);
  317. }
  318. static void
  319. lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
  320. struct lpfc_iocbq *rspwqe)
  321. {
  322. struct lpfc_vport *vport = cmdwqe->vport;
  323. struct lpfc_nvme_lport *lport;
  324. uint32_t status;
  325. struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
  326. status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
  327. if (vport->localport) {
  328. lport = (struct lpfc_nvme_lport *)vport->localport->private;
  329. if (lport) {
  330. atomic_inc(&lport->fc4NvmeLsCmpls);
  331. if (status) {
  332. if (bf_get(lpfc_wcqe_c_xb, wcqe))
  333. atomic_inc(&lport->cmpl_ls_xb);
  334. atomic_inc(&lport->cmpl_ls_err);
  335. }
  336. }
  337. }
  338. __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe);
  339. }
  340. static int
  341. lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
  342. struct lpfc_dmabuf *inp,
  343. struct nvmefc_ls_req *pnvme_lsreq,
  344. void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
  345. struct lpfc_iocbq *),
  346. struct lpfc_nodelist *ndlp, uint32_t num_entry,
  347. uint32_t tmo, uint8_t retry)
  348. {
  349. struct lpfc_hba *phba = vport->phba;
  350. union lpfc_wqe128 *wqe;
  351. struct lpfc_iocbq *genwqe;
  352. struct ulp_bde64 *bpl;
  353. struct ulp_bde64 bde;
  354. int i, rc, xmit_len, first_len;
  355. /* Allocate buffer for command WQE */
  356. genwqe = lpfc_sli_get_iocbq(phba);
  357. if (genwqe == NULL)
  358. return 1;
  359. wqe = &genwqe->wqe;
  360. /* Initialize only 64 bytes */
  361. memset(wqe, 0, sizeof(union lpfc_wqe));
  362. genwqe->bpl_dmabuf = bmp;
  363. genwqe->cmd_flag |= LPFC_IO_NVME_LS;
  364. /* Save for completion so we can release these resources */
  365. genwqe->ndlp = lpfc_nlp_get(ndlp);
  366. if (!genwqe->ndlp) {
  367. dev_warn(&phba->pcidev->dev,
  368. "Warning: Failed node ref, not sending LS_REQ\n");
  369. lpfc_sli_release_iocbq(phba, genwqe);
  370. return 1;
  371. }
  372. genwqe->context_un.nvme_lsreq = pnvme_lsreq;
  373. /* Fill in payload, bp points to frame payload */
  374. if (!tmo)
  375. /* FC spec states we need 3 * ratov for CT requests */
  376. tmo = (3 * phba->fc_ratov);
  377. /* For this command calculate the xmit length of the request bde. */
  378. xmit_len = 0;
  379. first_len = 0;
  380. bpl = (struct ulp_bde64 *)bmp->virt;
  381. for (i = 0; i < num_entry; i++) {
  382. bde.tus.w = bpl[i].tus.w;
  383. if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
  384. break;
  385. xmit_len += bde.tus.f.bdeSize;
  386. if (i == 0)
  387. first_len = xmit_len;
  388. }
  389. genwqe->num_bdes = num_entry;
  390. genwqe->hba_wqidx = 0;
  391. /* Words 0 - 2 */
  392. wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  393. wqe->generic.bde.tus.f.bdeSize = first_len;
  394. wqe->generic.bde.addrLow = bpl[0].addrLow;
  395. wqe->generic.bde.addrHigh = bpl[0].addrHigh;
  396. /* Word 3 */
  397. wqe->gen_req.request_payload_len = first_len;
  398. /* Word 4 */
  399. /* Word 5 */
  400. bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
  401. bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
  402. bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
  403. bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
  404. bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
  405. /* Word 6 */
  406. bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
  407. phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
  408. bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
  409. /* Word 7 */
  410. bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo);
  411. bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
  412. bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
  413. bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
  414. /* Word 8 */
  415. wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
  416. /* Word 9 */
  417. bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
  418. /* Word 10 */
  419. bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
  420. bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
  421. bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
  422. bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
  423. bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
  424. /* Word 11 */
  425. bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
  426. bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
  427. /* Issue GEN REQ WQE for NPORT <did> */
  428. genwqe->cmd_cmpl = cmpl;
  429. genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
  430. genwqe->vport = vport;
  431. genwqe->retry = retry;
  432. lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
  433. genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
  434. rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
  435. if (rc) {
  436. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  437. "6045 Issue GEN REQ WQE to NPORT x%x "
  438. "Data: x%x x%x rc x%x\n",
  439. ndlp->nlp_DID, genwqe->iotag,
  440. vport->port_state, rc);
  441. lpfc_nlp_put(ndlp);
  442. lpfc_sli_release_iocbq(phba, genwqe);
  443. return 1;
  444. }
  445. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS,
  446. "6050 Issue GEN REQ WQE to NPORT x%x "
  447. "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px "
  448. "bmp:x%px xmit:%d 1st:%d\n",
  449. ndlp->nlp_DID, genwqe->sli4_xritag,
  450. vport->port_state,
  451. genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
  452. return 0;
  453. }
  454. /**
  455. * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request
  456. * @vport: The local port issuing the LS
  457. * @ndlp: The remote port to send the LS to
  458. * @pnvme_lsreq: Pointer to LS request structure from the transport
  459. * @gen_req_cmp: Completion call-back
  460. *
  461. * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST
  462. * WQE to perform the LS operation.
  463. *
  464. * Return value :
  465. * 0 - Success
  466. * non-zero: various error codes, in form of -Exxx
  467. **/
  468. int
  469. __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  470. struct nvmefc_ls_req *pnvme_lsreq,
  471. void (*gen_req_cmp)(struct lpfc_hba *phba,
  472. struct lpfc_iocbq *cmdwqe,
  473. struct lpfc_iocbq *rspwqe))
  474. {
  475. struct lpfc_dmabuf *bmp;
  476. struct ulp_bde64 *bpl;
  477. int ret;
  478. uint16_t ntype, nstate;
  479. if (!ndlp) {
  480. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  481. "6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
  482. "LS Req\n",
  483. ndlp);
  484. return -ENODEV;
  485. }
  486. ntype = ndlp->nlp_type;
  487. nstate = ndlp->nlp_state;
  488. if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
  489. (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
  490. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  491. "6088 NVMEx LS REQ: Fail DID x%06x not "
  492. "ready for IO. Type x%x, State x%x\n",
  493. ndlp->nlp_DID, ntype, nstate);
  494. return -ENODEV;
  495. }
  496. if (vport->phba->hba_flag & HBA_IOQ_FLUSH)
  497. return -ENODEV;
  498. if (!vport->phba->sli4_hba.nvmels_wq)
  499. return -ENOMEM;
  500. /*
  501. * there are two dma buf in the request, actually there is one and
  502. * the second one is just the start address + cmd size.
  503. * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
  504. * in a lpfc_dmabuf struct. When freeing we just free the wrapper
  505. * because the nvem layer owns the data bufs.
  506. * We do not have to break these packets open, we don't care what is
  507. * in them. And we do not have to look at the resonse data, we only
  508. * care that we got a response. All of the caring is going to happen
  509. * in the nvme-fc layer.
  510. */
  511. bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
  512. if (!bmp) {
  513. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  514. "6044 NVMEx LS REQ: Could not alloc LS buf "
  515. "for DID %x\n",
  516. ndlp->nlp_DID);
  517. return -ENOMEM;
  518. }
  519. bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
  520. if (!bmp->virt) {
  521. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  522. "6042 NVMEx LS REQ: Could not alloc mbuf "
  523. "for DID %x\n",
  524. ndlp->nlp_DID);
  525. kfree(bmp);
  526. return -ENOMEM;
  527. }
  528. INIT_LIST_HEAD(&bmp->list);
  529. bpl = (struct ulp_bde64 *)bmp->virt;
  530. bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
  531. bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
  532. bpl->tus.f.bdeFlags = 0;
  533. bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
  534. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  535. bpl++;
  536. bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
  537. bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
  538. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
  539. bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
  540. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  541. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
  542. "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, "
  543. "rqstlen:%d rsplen:%d %pad %pad\n",
  544. ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
  545. pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
  546. &pnvme_lsreq->rspdma);
  547. ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
  548. pnvme_lsreq, gen_req_cmp, ndlp, 2,
  549. pnvme_lsreq->timeout, 0);
  550. if (ret != WQE_SUCCESS) {
  551. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  552. "6052 NVMEx REQ: EXIT. issue ls wqe failed "
  553. "lsreq x%px Status %x DID %x\n",
  554. pnvme_lsreq, ret, ndlp->nlp_DID);
  555. lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
  556. kfree(bmp);
  557. return -EIO;
  558. }
  559. return 0;
  560. }
  561. /**
  562. * lpfc_nvme_ls_req - Issue an NVME Link Service request
  563. * @pnvme_lport: Transport localport that LS is to be issued from.
  564. * @pnvme_rport: Transport remoteport that LS is to be sent to.
  565. * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
  566. *
  567. * Driver registers this routine to handle any link service request
  568. * from the nvme_fc transport to a remote nvme-aware port.
  569. *
  570. * Return value :
  571. * 0 - Success
  572. * non-zero: various error codes, in form of -Exxx
  573. **/
  574. static int
  575. lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
  576. struct nvme_fc_remote_port *pnvme_rport,
  577. struct nvmefc_ls_req *pnvme_lsreq)
  578. {
  579. struct lpfc_nvme_lport *lport;
  580. struct lpfc_nvme_rport *rport;
  581. struct lpfc_vport *vport;
  582. int ret;
  583. lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
  584. rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
  585. if (unlikely(!lport) || unlikely(!rport))
  586. return -EINVAL;
  587. vport = lport->vport;
  588. if (vport->load_flag & FC_UNLOADING ||
  589. vport->phba->hba_flag & HBA_IOQ_FLUSH)
  590. return -ENODEV;
  591. atomic_inc(&lport->fc4NvmeLsRequests);
  592. ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
  593. lpfc_nvme_ls_req_cmp);
  594. if (ret)
  595. atomic_inc(&lport->xmt_ls_err);
  596. return ret;
  597. }
  598. /**
  599. * __lpfc_nvme_ls_abort - Generic service routine to abort a prior
  600. * NVME LS request
  601. * @vport: The local port that issued the LS
  602. * @ndlp: The remote port the LS was sent to
  603. * @pnvme_lsreq: Pointer to LS request structure from the transport
  604. *
  605. * The driver validates the ndlp, looks for the LS, and aborts the
  606. * LS if found.
  607. *
  608. * Returns:
  609. * 0 : if LS found and aborted
  610. * non-zero: various error conditions in form -Exxx
  611. **/
  612. int
  613. __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  614. struct nvmefc_ls_req *pnvme_lsreq)
  615. {
  616. struct lpfc_hba *phba = vport->phba;
  617. struct lpfc_sli_ring *pring;
  618. struct lpfc_iocbq *wqe, *next_wqe;
  619. bool foundit = false;
  620. if (!ndlp) {
  621. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  622. "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID "
  623. "x%06x, Failing LS Req\n",
  624. ndlp, ndlp ? ndlp->nlp_DID : 0);
  625. return -EINVAL;
  626. }
  627. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
  628. "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq "
  629. "x%px rqstlen:%d rsplen:%d %pad %pad\n",
  630. pnvme_lsreq, pnvme_lsreq->rqstlen,
  631. pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
  632. &pnvme_lsreq->rspdma);
  633. /*
  634. * Lock the ELS ring txcmplq and look for the wqe that matches
  635. * this ELS. If found, issue an abort on the wqe.
  636. */
  637. pring = phba->sli4_hba.nvmels_wq->pring;
  638. spin_lock_irq(&phba->hbalock);
  639. spin_lock(&pring->ring_lock);
  640. list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
  641. if (wqe->context_un.nvme_lsreq == pnvme_lsreq) {
  642. wqe->cmd_flag |= LPFC_DRIVER_ABORTED;
  643. foundit = true;
  644. break;
  645. }
  646. }
  647. spin_unlock(&pring->ring_lock);
  648. if (foundit)
  649. lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL);
  650. spin_unlock_irq(&phba->hbalock);
  651. if (foundit)
  652. return 0;
  653. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
  654. "6213 NVMEx LS REQ Abort: Unable to locate req x%px\n",
  655. pnvme_lsreq);
  656. return -EINVAL;
  657. }
  658. static int
  659. lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
  660. struct nvme_fc_remote_port *remoteport,
  661. struct nvmefc_ls_rsp *ls_rsp)
  662. {
  663. struct lpfc_async_xchg_ctx *axchg =
  664. container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
  665. struct lpfc_nvme_lport *lport;
  666. int rc;
  667. if (axchg->phba->pport->load_flag & FC_UNLOADING)
  668. return -ENODEV;
  669. lport = (struct lpfc_nvme_lport *)localport->private;
  670. rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp);
  671. if (rc) {
  672. /*
  673. * unless the failure is due to having already sent
  674. * the response, an abort will be generated for the
  675. * exchange if the rsp can't be sent.
  676. */
  677. if (rc != -EALREADY)
  678. atomic_inc(&lport->xmt_ls_abort);
  679. return rc;
  680. }
  681. return 0;
  682. }
  683. /**
  684. * lpfc_nvme_ls_abort - Abort a prior NVME LS request
  685. * @pnvme_lport: Transport localport that LS is to be issued from.
  686. * @pnvme_rport: Transport remoteport that LS is to be sent to.
  687. * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
  688. *
  689. * Driver registers this routine to abort a NVME LS request that is
  690. * in progress (from the transports perspective).
  691. **/
  692. static void
  693. lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
  694. struct nvme_fc_remote_port *pnvme_rport,
  695. struct nvmefc_ls_req *pnvme_lsreq)
  696. {
  697. struct lpfc_nvme_lport *lport;
  698. struct lpfc_vport *vport;
  699. struct lpfc_nodelist *ndlp;
  700. int ret;
  701. lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
  702. if (unlikely(!lport))
  703. return;
  704. vport = lport->vport;
  705. if (vport->load_flag & FC_UNLOADING)
  706. return;
  707. ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
  708. ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq);
  709. if (!ret)
  710. atomic_inc(&lport->xmt_ls_abort);
  711. }
  712. /* Fix up the existing sgls for NVME IO. */
  713. static inline void
  714. lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
  715. struct lpfc_io_buf *lpfc_ncmd,
  716. struct nvmefc_fcp_req *nCmd)
  717. {
  718. struct lpfc_hba *phba = vport->phba;
  719. struct sli4_sge *sgl;
  720. union lpfc_wqe128 *wqe;
  721. uint32_t *wptr, *dptr;
  722. /*
  723. * Get a local pointer to the built-in wqe and correct
  724. * the cmd size to match NVME's 96 bytes and fix
  725. * the dma address.
  726. */
  727. wqe = &lpfc_ncmd->cur_iocbq.wqe;
  728. /*
  729. * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
  730. * match NVME. NVME sends 96 bytes. Also, use the
  731. * nvme commands command and response dma addresses
  732. * rather than the virtual memory to ease the restore
  733. * operation.
  734. */
  735. sgl = lpfc_ncmd->dma_sgl;
  736. sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
  737. if (phba->cfg_nvme_embed_cmd) {
  738. sgl->addr_hi = 0;
  739. sgl->addr_lo = 0;
  740. /* Word 0-2 - NVME CMND IU (embedded payload) */
  741. wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
  742. wqe->generic.bde.tus.f.bdeSize = 56;
  743. wqe->generic.bde.addrHigh = 0;
  744. wqe->generic.bde.addrLow = 64; /* Word 16 */
  745. /* Word 10 - dbde is 0, wqes is 1 in template */
  746. /*
  747. * Embed the payload in the last half of the WQE
  748. * WQE words 16-30 get the NVME CMD IU payload
  749. *
  750. * WQE words 16-19 get payload Words 1-4
  751. * WQE words 20-21 get payload Words 6-7
  752. * WQE words 22-29 get payload Words 16-23
  753. */
  754. wptr = &wqe->words[16]; /* WQE ptr */
  755. dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
  756. dptr++; /* Skip Word 0 in payload */
  757. *wptr++ = *dptr++; /* Word 1 */
  758. *wptr++ = *dptr++; /* Word 2 */
  759. *wptr++ = *dptr++; /* Word 3 */
  760. *wptr++ = *dptr++; /* Word 4 */
  761. dptr++; /* Skip Word 5 in payload */
  762. *wptr++ = *dptr++; /* Word 6 */
  763. *wptr++ = *dptr++; /* Word 7 */
  764. dptr += 8; /* Skip Words 8-15 in payload */
  765. *wptr++ = *dptr++; /* Word 16 */
  766. *wptr++ = *dptr++; /* Word 17 */
  767. *wptr++ = *dptr++; /* Word 18 */
  768. *wptr++ = *dptr++; /* Word 19 */
  769. *wptr++ = *dptr++; /* Word 20 */
  770. *wptr++ = *dptr++; /* Word 21 */
  771. *wptr++ = *dptr++; /* Word 22 */
  772. *wptr = *dptr; /* Word 23 */
  773. } else {
  774. sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
  775. sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
  776. /* Word 0-2 - NVME CMND IU Inline BDE */
  777. wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  778. wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
  779. wqe->generic.bde.addrHigh = sgl->addr_hi;
  780. wqe->generic.bde.addrLow = sgl->addr_lo;
  781. /* Word 10 */
  782. bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
  783. bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
  784. }
  785. sgl++;
  786. /* Setup the physical region for the FCP RSP */
  787. sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
  788. sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
  789. sgl->word2 = le32_to_cpu(sgl->word2);
  790. if (nCmd->sg_cnt)
  791. bf_set(lpfc_sli4_sge_last, sgl, 0);
  792. else
  793. bf_set(lpfc_sli4_sge_last, sgl, 1);
  794. sgl->word2 = cpu_to_le32(sgl->word2);
  795. sgl->sge_len = cpu_to_le32(nCmd->rsplen);
  796. }
  797. /*
  798. * lpfc_nvme_io_cmd_cmpl - Complete an NVME-over-FCP IO
  799. *
  800. * Driver registers this routine as it io request handler. This
  801. * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
  802. * data structure to the rport indicated in @lpfc_nvme_rport.
  803. *
  804. * Return value :
  805. * 0 - Success
  806. * TODO: What are the failure codes.
  807. **/
  808. static void
  809. lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
  810. struct lpfc_iocbq *pwqeOut)
  811. {
  812. struct lpfc_io_buf *lpfc_ncmd = pwqeIn->io_buf;
  813. struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
  814. struct lpfc_vport *vport = pwqeIn->vport;
  815. struct nvmefc_fcp_req *nCmd;
  816. struct nvme_fc_ersp_iu *ep;
  817. struct nvme_fc_cmd_iu *cp;
  818. struct lpfc_nodelist *ndlp;
  819. struct lpfc_nvme_fcpreq_priv *freqpriv;
  820. struct lpfc_nvme_lport *lport;
  821. uint32_t code, status, idx;
  822. uint16_t cid, sqhd, data;
  823. uint32_t *ptr;
  824. uint32_t lat;
  825. bool call_done = false;
  826. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  827. int cpu;
  828. #endif
  829. int offline = 0;
  830. /* Sanity check on return of outstanding command */
  831. if (!lpfc_ncmd) {
  832. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  833. "6071 Null lpfc_ncmd pointer. No "
  834. "release, skip completion\n");
  835. return;
  836. }
  837. /* Guard against abort handler being called at same time */
  838. spin_lock(&lpfc_ncmd->buf_lock);
  839. if (!lpfc_ncmd->nvmeCmd) {
  840. spin_unlock(&lpfc_ncmd->buf_lock);
  841. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  842. "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
  843. "nvmeCmd x%px\n",
  844. lpfc_ncmd, lpfc_ncmd->nvmeCmd);
  845. /* Release the lpfc_ncmd regardless of the missing elements. */
  846. lpfc_release_nvme_buf(phba, lpfc_ncmd);
  847. return;
  848. }
  849. nCmd = lpfc_ncmd->nvmeCmd;
  850. status = bf_get(lpfc_wcqe_c_status, wcqe);
  851. idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
  852. phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
  853. if (unlikely(status && vport->localport)) {
  854. lport = (struct lpfc_nvme_lport *)vport->localport->private;
  855. if (lport) {
  856. if (bf_get(lpfc_wcqe_c_xb, wcqe))
  857. atomic_inc(&lport->cmpl_fcp_xb);
  858. atomic_inc(&lport->cmpl_fcp_err);
  859. }
  860. }
  861. lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
  862. lpfc_ncmd->cur_iocbq.sli4_xritag,
  863. status, wcqe->parameter);
  864. /*
  865. * Catch race where our node has transitioned, but the
  866. * transport is still transitioning.
  867. */
  868. ndlp = lpfc_ncmd->ndlp;
  869. if (!ndlp) {
  870. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  871. "6062 Ignoring NVME cmpl. No ndlp\n");
  872. goto out_err;
  873. }
  874. code = bf_get(lpfc_wcqe_c_code, wcqe);
  875. if (code == CQE_CODE_NVME_ERSP) {
  876. /* For this type of CQE, we need to rebuild the rsp */
  877. ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
  878. /*
  879. * Get Command Id from cmd to plug into response. This
  880. * code is not needed in the next NVME Transport drop.
  881. */
  882. cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
  883. cid = cp->sqe.common.command_id;
  884. /*
  885. * RSN is in CQE word 2
  886. * SQHD is in CQE Word 3 bits 15:0
  887. * Cmd Specific info is in CQE Word 1
  888. * and in CQE Word 0 bits 15:0
  889. */
  890. sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
  891. /* Now lets build the NVME ERSP IU */
  892. ep->iu_len = cpu_to_be16(8);
  893. ep->rsn = wcqe->parameter;
  894. ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
  895. ep->rsvd12 = 0;
  896. ptr = (uint32_t *)&ep->cqe.result.u64;
  897. *ptr++ = wcqe->total_data_placed;
  898. data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
  899. *ptr = (uint32_t)data;
  900. ep->cqe.sq_head = sqhd;
  901. ep->cqe.sq_id = nCmd->sqid;
  902. ep->cqe.command_id = cid;
  903. ep->cqe.status = 0;
  904. lpfc_ncmd->status = IOSTAT_SUCCESS;
  905. lpfc_ncmd->result = 0;
  906. nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
  907. nCmd->transferred_length = nCmd->payload_length;
  908. } else {
  909. lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
  910. lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
  911. /* For NVME, the only failure path that results in an
  912. * IO error is when the adapter rejects it. All other
  913. * conditions are a success case and resolved by the
  914. * transport.
  915. * IOSTAT_FCP_RSP_ERROR means:
  916. * 1. Length of data received doesn't match total
  917. * transfer length in WQE
  918. * 2. If the RSP payload does NOT match these cases:
  919. * a. RSP length 12/24 bytes and all zeros
  920. * b. NVME ERSP
  921. */
  922. switch (lpfc_ncmd->status) {
  923. case IOSTAT_SUCCESS:
  924. nCmd->transferred_length = wcqe->total_data_placed;
  925. nCmd->rcv_rsplen = 0;
  926. nCmd->status = 0;
  927. break;
  928. case IOSTAT_FCP_RSP_ERROR:
  929. nCmd->transferred_length = wcqe->total_data_placed;
  930. nCmd->rcv_rsplen = wcqe->parameter;
  931. nCmd->status = 0;
  932. /* Get the NVME cmd details for this unique error. */
  933. cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
  934. ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
  935. /* Check if this is really an ERSP */
  936. if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
  937. lpfc_ncmd->status = IOSTAT_SUCCESS;
  938. lpfc_ncmd->result = 0;
  939. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
  940. "6084 NVME FCP_ERR ERSP: "
  941. "xri %x placed x%x opcode x%x cmd_id "
  942. "x%x cqe_status x%x\n",
  943. lpfc_ncmd->cur_iocbq.sli4_xritag,
  944. wcqe->total_data_placed,
  945. cp->sqe.common.opcode,
  946. cp->sqe.common.command_id,
  947. ep->cqe.status);
  948. break;
  949. }
  950. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  951. "6081 NVME Completion Protocol Error: "
  952. "xri %x status x%x result x%x "
  953. "placed x%x opcode x%x cmd_id x%x, "
  954. "cqe_status x%x\n",
  955. lpfc_ncmd->cur_iocbq.sli4_xritag,
  956. lpfc_ncmd->status, lpfc_ncmd->result,
  957. wcqe->total_data_placed,
  958. cp->sqe.common.opcode,
  959. cp->sqe.common.command_id,
  960. ep->cqe.status);
  961. break;
  962. case IOSTAT_LOCAL_REJECT:
  963. /* Let fall through to set command final state. */
  964. if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
  965. lpfc_printf_vlog(vport, KERN_INFO,
  966. LOG_NVME_IOERR,
  967. "6032 Delay Aborted cmd x%px "
  968. "nvme cmd x%px, xri x%x, "
  969. "xb %d\n",
  970. lpfc_ncmd, nCmd,
  971. lpfc_ncmd->cur_iocbq.sli4_xritag,
  972. bf_get(lpfc_wcqe_c_xb, wcqe));
  973. fallthrough;
  974. default:
  975. out_err:
  976. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
  977. "6072 NVME Completion Error: xri %x "
  978. "status x%x result x%x [x%x] "
  979. "placed x%x\n",
  980. lpfc_ncmd->cur_iocbq.sli4_xritag,
  981. lpfc_ncmd->status, lpfc_ncmd->result,
  982. wcqe->parameter,
  983. wcqe->total_data_placed);
  984. nCmd->transferred_length = 0;
  985. nCmd->rcv_rsplen = 0;
  986. nCmd->status = NVME_SC_INTERNAL;
  987. offline = pci_channel_offline(vport->phba->pcidev);
  988. }
  989. }
  990. /* pick up SLI4 exhange busy condition */
  991. if (bf_get(lpfc_wcqe_c_xb, wcqe) && !offline)
  992. lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
  993. else
  994. lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
  995. /* Update stats and complete the IO. There is
  996. * no need for dma unprep because the nvme_transport
  997. * owns the dma address.
  998. */
  999. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  1000. if (lpfc_ncmd->ts_cmd_start) {
  1001. lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
  1002. lpfc_ncmd->ts_data_io = ktime_get_ns();
  1003. phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
  1004. lpfc_io_ktime(phba, lpfc_ncmd);
  1005. }
  1006. if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
  1007. cpu = raw_smp_processor_id();
  1008. this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
  1009. if (lpfc_ncmd->cpu != cpu)
  1010. lpfc_printf_vlog(vport,
  1011. KERN_INFO, LOG_NVME_IOERR,
  1012. "6701 CPU Check cmpl: "
  1013. "cpu %d expect %d\n",
  1014. cpu, lpfc_ncmd->cpu);
  1015. }
  1016. #endif
  1017. /* NVME targets need completion held off until the abort exchange
  1018. * completes unless the NVME Rport is getting unregistered.
  1019. */
  1020. if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
  1021. freqpriv = nCmd->private;
  1022. freqpriv->nvme_buf = NULL;
  1023. lpfc_ncmd->nvmeCmd = NULL;
  1024. call_done = true;
  1025. }
  1026. spin_unlock(&lpfc_ncmd->buf_lock);
  1027. /* Check if IO qualified for CMF */
  1028. if (phba->cmf_active_mode != LPFC_CFG_OFF &&
  1029. nCmd->io_dir == NVMEFC_FCP_READ &&
  1030. nCmd->payload_length) {
  1031. /* Used when calculating average latency */
  1032. lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start;
  1033. lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL);
  1034. }
  1035. if (call_done)
  1036. nCmd->done(nCmd);
  1037. /* Call release with XB=1 to queue the IO into the abort list. */
  1038. lpfc_release_nvme_buf(phba, lpfc_ncmd);
  1039. }
  1040. /**
  1041. * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
  1042. * @vport: pointer to a host virtual N_Port data structure
  1043. * @lpfc_ncmd: Pointer to lpfc scsi command
  1044. * @pnode: pointer to a node-list data structure
  1045. * @cstat: pointer to the control status structure
  1046. *
  1047. * Driver registers this routine as it io request handler. This
  1048. * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
  1049. * data structure to the rport indicated in @lpfc_nvme_rport.
  1050. *
  1051. * Return value :
  1052. * 0 - Success
  1053. * TODO: What are the failure codes.
  1054. **/
  1055. static int
  1056. lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
  1057. struct lpfc_io_buf *lpfc_ncmd,
  1058. struct lpfc_nodelist *pnode,
  1059. struct lpfc_fc4_ctrl_stat *cstat)
  1060. {
  1061. struct lpfc_hba *phba = vport->phba;
  1062. struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
  1063. struct nvme_common_command *sqe;
  1064. struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
  1065. union lpfc_wqe128 *wqe = &pwqeq->wqe;
  1066. uint32_t req_len;
  1067. /*
  1068. * There are three possibilities here - use scatter-gather segment, use
  1069. * the single mapping, or neither.
  1070. */
  1071. if (nCmd->sg_cnt) {
  1072. if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
  1073. /* From the iwrite template, initialize words 7 - 11 */
  1074. memcpy(&wqe->words[7],
  1075. &lpfc_iwrite_cmd_template.words[7],
  1076. sizeof(uint32_t) * 5);
  1077. /* Word 4 */
  1078. wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
  1079. /* Word 5 */
  1080. if ((phba->cfg_nvme_enable_fb) &&
  1081. (pnode->nlp_flag & NLP_FIRSTBURST)) {
  1082. req_len = lpfc_ncmd->nvmeCmd->payload_length;
  1083. if (req_len < pnode->nvme_fb_size)
  1084. wqe->fcp_iwrite.initial_xfer_len =
  1085. req_len;
  1086. else
  1087. wqe->fcp_iwrite.initial_xfer_len =
  1088. pnode->nvme_fb_size;
  1089. } else {
  1090. wqe->fcp_iwrite.initial_xfer_len = 0;
  1091. }
  1092. cstat->output_requests++;
  1093. } else {
  1094. /* From the iread template, initialize words 7 - 11 */
  1095. memcpy(&wqe->words[7],
  1096. &lpfc_iread_cmd_template.words[7],
  1097. sizeof(uint32_t) * 5);
  1098. /* Word 4 */
  1099. wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
  1100. /* Word 5 */
  1101. wqe->fcp_iread.rsrvd5 = 0;
  1102. /* For a CMF Managed port, iod must be zero'ed */
  1103. if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
  1104. bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
  1105. LPFC_WQE_IOD_NONE);
  1106. cstat->input_requests++;
  1107. }
  1108. } else {
  1109. /* From the icmnd template, initialize words 4 - 11 */
  1110. memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
  1111. sizeof(uint32_t) * 8);
  1112. cstat->control_requests++;
  1113. }
  1114. if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
  1115. bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
  1116. sqe = &((struct nvme_fc_cmd_iu *)
  1117. nCmd->cmdaddr)->sqe.common;
  1118. if (sqe->opcode == nvme_admin_async_event)
  1119. bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
  1120. }
  1121. /*
  1122. * Finish initializing those WQE fields that are independent
  1123. * of the nvme_cmnd request_buffer
  1124. */
  1125. /* Word 3 */
  1126. bf_set(payload_offset_len, &wqe->fcp_icmd,
  1127. (nCmd->rsplen + nCmd->cmdlen));
  1128. /* Word 6 */
  1129. bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
  1130. phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
  1131. bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
  1132. /* Word 8 */
  1133. wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
  1134. /* Word 9 */
  1135. bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
  1136. /* Word 10 */
  1137. bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG);
  1138. /* Words 13 14 15 are for PBDE support */
  1139. /* add the VMID tags as per switch response */
  1140. if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) {
  1141. if (phba->pport->vmid_priority_tagging) {
  1142. bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
  1143. bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
  1144. lpfc_ncmd->cur_iocbq.vmid_tag.cs_ctl_vmid);
  1145. } else {
  1146. bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
  1147. bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
  1148. wqe->words[31] = lpfc_ncmd->cur_iocbq.vmid_tag.app_id;
  1149. }
  1150. }
  1151. pwqeq->vport = vport;
  1152. return 0;
  1153. }
  1154. /**
  1155. * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
  1156. * @vport: pointer to a host virtual N_Port data structure
  1157. * @lpfc_ncmd: Pointer to lpfc scsi command
  1158. *
  1159. * Driver registers this routine as it io request handler. This
  1160. * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
  1161. * data structure to the rport indicated in @lpfc_nvme_rport.
  1162. *
  1163. * Return value :
  1164. * 0 - Success
  1165. * TODO: What are the failure codes.
  1166. **/
  1167. static int
  1168. lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
  1169. struct lpfc_io_buf *lpfc_ncmd)
  1170. {
  1171. struct lpfc_hba *phba = vport->phba;
  1172. struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
  1173. union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
  1174. struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
  1175. struct sli4_hybrid_sgl *sgl_xtra = NULL;
  1176. struct scatterlist *data_sg;
  1177. struct sli4_sge *first_data_sgl;
  1178. struct ulp_bde64 *bde;
  1179. dma_addr_t physaddr = 0;
  1180. uint32_t dma_len = 0;
  1181. uint32_t dma_offset = 0;
  1182. int nseg, i, j;
  1183. bool lsp_just_set = false;
  1184. /* Fix up the command and response DMA stuff. */
  1185. lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
  1186. /*
  1187. * There are three possibilities here - use scatter-gather segment, use
  1188. * the single mapping, or neither.
  1189. */
  1190. if (nCmd->sg_cnt) {
  1191. /*
  1192. * Jump over the cmd and rsp SGEs. The fix routine
  1193. * has already adjusted for this.
  1194. */
  1195. sgl += 2;
  1196. first_data_sgl = sgl;
  1197. lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
  1198. if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
  1199. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  1200. "6058 Too many sg segments from "
  1201. "NVME Transport. Max %d, "
  1202. "nvmeIO sg_cnt %d\n",
  1203. phba->cfg_nvme_seg_cnt + 1,
  1204. lpfc_ncmd->seg_cnt);
  1205. lpfc_ncmd->seg_cnt = 0;
  1206. return 1;
  1207. }
  1208. /*
  1209. * The driver established a maximum scatter-gather segment count
  1210. * during probe that limits the number of sg elements in any
  1211. * single nvme command. Just run through the seg_cnt and format
  1212. * the sge's.
  1213. */
  1214. nseg = nCmd->sg_cnt;
  1215. data_sg = nCmd->first_sgl;
  1216. /* for tracking the segment boundaries */
  1217. j = 2;
  1218. for (i = 0; i < nseg; i++) {
  1219. if (data_sg == NULL) {
  1220. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  1221. "6059 dptr err %d, nseg %d\n",
  1222. i, nseg);
  1223. lpfc_ncmd->seg_cnt = 0;
  1224. return 1;
  1225. }
  1226. sgl->word2 = 0;
  1227. if (nseg == 1) {
  1228. bf_set(lpfc_sli4_sge_last, sgl, 1);
  1229. bf_set(lpfc_sli4_sge_type, sgl,
  1230. LPFC_SGE_TYPE_DATA);
  1231. } else {
  1232. bf_set(lpfc_sli4_sge_last, sgl, 0);
  1233. /* expand the segment */
  1234. if (!lsp_just_set &&
  1235. !((j + 1) % phba->border_sge_num) &&
  1236. ((nseg - 1) != i)) {
  1237. /* set LSP type */
  1238. bf_set(lpfc_sli4_sge_type, sgl,
  1239. LPFC_SGE_TYPE_LSP);
  1240. sgl_xtra = lpfc_get_sgl_per_hdwq(
  1241. phba, lpfc_ncmd);
  1242. if (unlikely(!sgl_xtra)) {
  1243. lpfc_ncmd->seg_cnt = 0;
  1244. return 1;
  1245. }
  1246. sgl->addr_lo = cpu_to_le32(putPaddrLow(
  1247. sgl_xtra->dma_phys_sgl));
  1248. sgl->addr_hi = cpu_to_le32(putPaddrHigh(
  1249. sgl_xtra->dma_phys_sgl));
  1250. } else {
  1251. bf_set(lpfc_sli4_sge_type, sgl,
  1252. LPFC_SGE_TYPE_DATA);
  1253. }
  1254. }
  1255. if (!(bf_get(lpfc_sli4_sge_type, sgl) &
  1256. LPFC_SGE_TYPE_LSP)) {
  1257. if ((nseg - 1) == i)
  1258. bf_set(lpfc_sli4_sge_last, sgl, 1);
  1259. physaddr = sg_dma_address(data_sg);
  1260. dma_len = sg_dma_len(data_sg);
  1261. sgl->addr_lo = cpu_to_le32(
  1262. putPaddrLow(physaddr));
  1263. sgl->addr_hi = cpu_to_le32(
  1264. putPaddrHigh(physaddr));
  1265. bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
  1266. sgl->word2 = cpu_to_le32(sgl->word2);
  1267. sgl->sge_len = cpu_to_le32(dma_len);
  1268. dma_offset += dma_len;
  1269. data_sg = sg_next(data_sg);
  1270. sgl++;
  1271. lsp_just_set = false;
  1272. } else {
  1273. sgl->word2 = cpu_to_le32(sgl->word2);
  1274. sgl->sge_len = cpu_to_le32(
  1275. phba->cfg_sg_dma_buf_size);
  1276. sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
  1277. i = i - 1;
  1278. lsp_just_set = true;
  1279. }
  1280. j++;
  1281. }
  1282. /* PBDE support for first data SGE only */
  1283. if (nseg == 1 && phba->cfg_enable_pbde) {
  1284. /* Words 13-15 */
  1285. bde = (struct ulp_bde64 *)
  1286. &wqe->words[13];
  1287. bde->addrLow = first_data_sgl->addr_lo;
  1288. bde->addrHigh = first_data_sgl->addr_hi;
  1289. bde->tus.f.bdeSize =
  1290. le32_to_cpu(first_data_sgl->sge_len);
  1291. bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  1292. bde->tus.w = cpu_to_le32(bde->tus.w);
  1293. /* Word 11 - set PBDE bit */
  1294. bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
  1295. } else {
  1296. memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
  1297. /* Word 11 - PBDE bit disabled by default template */
  1298. }
  1299. } else {
  1300. lpfc_ncmd->seg_cnt = 0;
  1301. /* For this clause to be valid, the payload_length
  1302. * and sg_cnt must zero.
  1303. */
  1304. if (nCmd->payload_length != 0) {
  1305. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  1306. "6063 NVME DMA Prep Err: sg_cnt %d "
  1307. "payload_length x%x\n",
  1308. nCmd->sg_cnt, nCmd->payload_length);
  1309. return 1;
  1310. }
  1311. }
  1312. return 0;
  1313. }
  1314. /**
  1315. * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
  1316. * @pnvme_lport: Pointer to the driver's local port data
  1317. * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
  1318. * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
  1319. * @pnvme_fcreq: IO request from nvme fc to driver.
  1320. *
  1321. * Driver registers this routine as it io request handler. This
  1322. * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
  1323. * data structure to the rport indicated in @lpfc_nvme_rport.
  1324. *
  1325. * Return value :
  1326. * 0 - Success
  1327. * TODO: What are the failure codes.
  1328. **/
  1329. static int
  1330. lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
  1331. struct nvme_fc_remote_port *pnvme_rport,
  1332. void *hw_queue_handle,
  1333. struct nvmefc_fcp_req *pnvme_fcreq)
  1334. {
  1335. int ret = 0;
  1336. int expedite = 0;
  1337. int idx, cpu;
  1338. struct lpfc_nvme_lport *lport;
  1339. struct lpfc_fc4_ctrl_stat *cstat;
  1340. struct lpfc_vport *vport;
  1341. struct lpfc_hba *phba;
  1342. struct lpfc_nodelist *ndlp;
  1343. struct lpfc_io_buf *lpfc_ncmd;
  1344. struct lpfc_nvme_rport *rport;
  1345. struct lpfc_nvme_qhandle *lpfc_queue_info;
  1346. struct lpfc_nvme_fcpreq_priv *freqpriv;
  1347. struct nvme_common_command *sqe;
  1348. uint64_t start = 0;
  1349. #if (IS_ENABLED(CONFIG_NVME_FC))
  1350. u8 *uuid = NULL;
  1351. int err;
  1352. enum dma_data_direction iodir;
  1353. #endif
  1354. /* Validate pointers. LLDD fault handling with transport does
  1355. * have timing races.
  1356. */
  1357. lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
  1358. if (unlikely(!lport)) {
  1359. ret = -EINVAL;
  1360. goto out_fail;
  1361. }
  1362. vport = lport->vport;
  1363. if (unlikely(!hw_queue_handle)) {
  1364. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
  1365. "6117 Fail IO, NULL hw_queue_handle\n");
  1366. atomic_inc(&lport->xmt_fcp_err);
  1367. ret = -EBUSY;
  1368. goto out_fail;
  1369. }
  1370. phba = vport->phba;
  1371. if ((unlikely(vport->load_flag & FC_UNLOADING)) ||
  1372. phba->hba_flag & HBA_IOQ_FLUSH) {
  1373. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
  1374. "6124 Fail IO, Driver unload\n");
  1375. atomic_inc(&lport->xmt_fcp_err);
  1376. ret = -ENODEV;
  1377. goto out_fail;
  1378. }
  1379. freqpriv = pnvme_fcreq->private;
  1380. if (unlikely(!freqpriv)) {
  1381. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
  1382. "6158 Fail IO, NULL request data\n");
  1383. atomic_inc(&lport->xmt_fcp_err);
  1384. ret = -EINVAL;
  1385. goto out_fail;
  1386. }
  1387. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  1388. if (phba->ktime_on)
  1389. start = ktime_get_ns();
  1390. #endif
  1391. rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
  1392. lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
  1393. /*
  1394. * Catch race where our node has transitioned, but the
  1395. * transport is still transitioning.
  1396. */
  1397. ndlp = rport->ndlp;
  1398. if (!ndlp) {
  1399. lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
  1400. "6053 Busy IO, ndlp not ready: rport x%px "
  1401. "ndlp x%px, DID x%06x\n",
  1402. rport, ndlp, pnvme_rport->port_id);
  1403. atomic_inc(&lport->xmt_fcp_err);
  1404. ret = -EBUSY;
  1405. goto out_fail;
  1406. }
  1407. /* The remote node has to be a mapped target or it's an error. */
  1408. if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
  1409. (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
  1410. lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
  1411. "6036 Fail IO, DID x%06x not ready for "
  1412. "IO. State x%x, Type x%x Flg x%x\n",
  1413. pnvme_rport->port_id,
  1414. ndlp->nlp_state, ndlp->nlp_type,
  1415. ndlp->fc4_xpt_flags);
  1416. atomic_inc(&lport->xmt_fcp_bad_ndlp);
  1417. ret = -EBUSY;
  1418. goto out_fail;
  1419. }
  1420. /* Currently only NVME Keep alive commands should be expedited
  1421. * if the driver runs out of a resource. These should only be
  1422. * issued on the admin queue, qidx 0
  1423. */
  1424. if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
  1425. sqe = &((struct nvme_fc_cmd_iu *)
  1426. pnvme_fcreq->cmdaddr)->sqe.common;
  1427. if (sqe->opcode == nvme_admin_keep_alive)
  1428. expedite = 1;
  1429. }
  1430. /* Check if IO qualifies for CMF */
  1431. if (phba->cmf_active_mode != LPFC_CFG_OFF &&
  1432. pnvme_fcreq->io_dir == NVMEFC_FCP_READ &&
  1433. pnvme_fcreq->payload_length) {
  1434. ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length);
  1435. if (ret) {
  1436. ret = -EBUSY;
  1437. goto out_fail;
  1438. }
  1439. /* Get start time for IO latency */
  1440. start = ktime_get_ns();
  1441. }
  1442. /* The node is shared with FCP IO, make sure the IO pending count does
  1443. * not exceed the programmed depth.
  1444. */
  1445. if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
  1446. if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
  1447. !expedite) {
  1448. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
  1449. "6174 Fail IO, ndlp qdepth exceeded: "
  1450. "idx %d DID %x pend %d qdepth %d\n",
  1451. lpfc_queue_info->index, ndlp->nlp_DID,
  1452. atomic_read(&ndlp->cmd_pending),
  1453. ndlp->cmd_qdepth);
  1454. atomic_inc(&lport->xmt_fcp_qdepth);
  1455. ret = -EBUSY;
  1456. goto out_fail1;
  1457. }
  1458. }
  1459. /* Lookup Hardware Queue index based on fcp_io_sched module parameter */
  1460. if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
  1461. idx = lpfc_queue_info->index;
  1462. } else {
  1463. cpu = raw_smp_processor_id();
  1464. idx = phba->sli4_hba.cpu_map[cpu].hdwq;
  1465. }
  1466. lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
  1467. if (lpfc_ncmd == NULL) {
  1468. atomic_inc(&lport->xmt_fcp_noxri);
  1469. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
  1470. "6065 Fail IO, driver buffer pool is empty: "
  1471. "idx %d DID %x\n",
  1472. lpfc_queue_info->index, ndlp->nlp_DID);
  1473. ret = -EBUSY;
  1474. goto out_fail1;
  1475. }
  1476. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  1477. if (start) {
  1478. lpfc_ncmd->ts_cmd_start = start;
  1479. lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
  1480. } else {
  1481. lpfc_ncmd->ts_cmd_start = 0;
  1482. }
  1483. #endif
  1484. lpfc_ncmd->rx_cmd_start = start;
  1485. /*
  1486. * Store the data needed by the driver to issue, abort, and complete
  1487. * an IO.
  1488. * Do not let the IO hang out forever. There is no midlayer issuing
  1489. * an abort so inform the FW of the maximum IO pending time.
  1490. */
  1491. freqpriv->nvme_buf = lpfc_ncmd;
  1492. lpfc_ncmd->nvmeCmd = pnvme_fcreq;
  1493. lpfc_ncmd->ndlp = ndlp;
  1494. lpfc_ncmd->qidx = lpfc_queue_info->qidx;
  1495. #if (IS_ENABLED(CONFIG_NVME_FC))
  1496. /* check the necessary and sufficient condition to support VMID */
  1497. if (lpfc_is_vmid_enabled(phba) &&
  1498. (ndlp->vmid_support ||
  1499. phba->pport->vmid_priority_tagging ==
  1500. LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
  1501. /* is the I/O generated by a VM, get the associated virtual */
  1502. /* entity id */
  1503. uuid = nvme_fc_io_getuuid(pnvme_fcreq);
  1504. if (uuid) {
  1505. if (pnvme_fcreq->io_dir == NVMEFC_FCP_WRITE)
  1506. iodir = DMA_TO_DEVICE;
  1507. else if (pnvme_fcreq->io_dir == NVMEFC_FCP_READ)
  1508. iodir = DMA_FROM_DEVICE;
  1509. else
  1510. iodir = DMA_NONE;
  1511. err = lpfc_vmid_get_appid(vport, uuid, iodir,
  1512. (union lpfc_vmid_io_tag *)
  1513. &lpfc_ncmd->cur_iocbq.vmid_tag);
  1514. if (!err)
  1515. lpfc_ncmd->cur_iocbq.cmd_flag |= LPFC_IO_VMID;
  1516. }
  1517. }
  1518. #endif
  1519. /*
  1520. * Issue the IO on the WQ indicated by index in the hw_queue_handle.
  1521. * This identfier was create in our hardware queue create callback
  1522. * routine. The driver now is dependent on the IO queue steering from
  1523. * the transport. We are trusting the upper NVME layers know which
  1524. * index to use and that they have affinitized a CPU to this hardware
  1525. * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
  1526. */
  1527. lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
  1528. cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
  1529. lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
  1530. ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
  1531. if (ret) {
  1532. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
  1533. "6175 Fail IO, Prep DMA: "
  1534. "idx %d DID %x\n",
  1535. lpfc_queue_info->index, ndlp->nlp_DID);
  1536. atomic_inc(&lport->xmt_fcp_err);
  1537. ret = -ENOMEM;
  1538. goto out_free_nvme_buf;
  1539. }
  1540. lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
  1541. lpfc_ncmd->cur_iocbq.sli4_xritag,
  1542. lpfc_queue_info->index, ndlp->nlp_DID);
  1543. ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
  1544. if (ret) {
  1545. atomic_inc(&lport->xmt_fcp_wqerr);
  1546. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
  1547. "6113 Fail IO, Could not issue WQE err %x "
  1548. "sid: x%x did: x%x oxid: x%x\n",
  1549. ret, vport->fc_myDID, ndlp->nlp_DID,
  1550. lpfc_ncmd->cur_iocbq.sli4_xritag);
  1551. goto out_free_nvme_buf;
  1552. }
  1553. if (phba->cfg_xri_rebalancing)
  1554. lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
  1555. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  1556. if (lpfc_ncmd->ts_cmd_start)
  1557. lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
  1558. if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
  1559. cpu = raw_smp_processor_id();
  1560. this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
  1561. lpfc_ncmd->cpu = cpu;
  1562. if (idx != cpu)
  1563. lpfc_printf_vlog(vport,
  1564. KERN_INFO, LOG_NVME_IOERR,
  1565. "6702 CPU Check cmd: "
  1566. "cpu %d wq %d\n",
  1567. lpfc_ncmd->cpu,
  1568. lpfc_queue_info->index);
  1569. }
  1570. #endif
  1571. return 0;
  1572. out_free_nvme_buf:
  1573. if (lpfc_ncmd->nvmeCmd->sg_cnt) {
  1574. if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
  1575. cstat->output_requests--;
  1576. else
  1577. cstat->input_requests--;
  1578. } else
  1579. cstat->control_requests--;
  1580. lpfc_release_nvme_buf(phba, lpfc_ncmd);
  1581. out_fail1:
  1582. lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT,
  1583. pnvme_fcreq->payload_length, NULL);
  1584. out_fail:
  1585. return ret;
  1586. }
  1587. /**
  1588. * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
  1589. * @phba: Pointer to HBA context object
  1590. * @cmdiocb: Pointer to command iocb object.
  1591. * @rspiocb: Pointer to response iocb object.
  1592. *
  1593. * This is the callback function for any NVME FCP IO that was aborted.
  1594. *
  1595. * Return value:
  1596. * None
  1597. **/
  1598. void
  1599. lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  1600. struct lpfc_iocbq *rspiocb)
  1601. {
  1602. struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl;
  1603. lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
  1604. "6145 ABORT_XRI_CN completing on rpi x%x "
  1605. "original iotag x%x, abort cmd iotag x%x "
  1606. "req_tag x%x, status x%x, hwstatus x%x\n",
  1607. bf_get(wqe_ctxt_tag, &cmdiocb->wqe.generic.wqe_com),
  1608. get_job_abtsiotag(phba, cmdiocb), cmdiocb->iotag,
  1609. bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
  1610. bf_get(lpfc_wcqe_c_status, abts_cmpl),
  1611. bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
  1612. lpfc_sli_release_iocbq(phba, cmdiocb);
  1613. }
  1614. /**
  1615. * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
  1616. * @pnvme_lport: Pointer to the driver's local port data
  1617. * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
  1618. * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
  1619. * @pnvme_fcreq: IO request from nvme fc to driver.
  1620. *
  1621. * Driver registers this routine as its nvme request io abort handler. This
  1622. * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
  1623. * data structure to the rport indicated in @lpfc_nvme_rport. This routine
  1624. * is executed asynchronously - one the target is validated as "MAPPED" and
  1625. * ready for IO, the driver issues the abort request and returns.
  1626. *
  1627. * Return value:
  1628. * None
  1629. **/
  1630. static void
  1631. lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
  1632. struct nvme_fc_remote_port *pnvme_rport,
  1633. void *hw_queue_handle,
  1634. struct nvmefc_fcp_req *pnvme_fcreq)
  1635. {
  1636. struct lpfc_nvme_lport *lport;
  1637. struct lpfc_vport *vport;
  1638. struct lpfc_hba *phba;
  1639. struct lpfc_io_buf *lpfc_nbuf;
  1640. struct lpfc_iocbq *nvmereq_wqe;
  1641. struct lpfc_nvme_fcpreq_priv *freqpriv;
  1642. unsigned long flags;
  1643. int ret_val;
  1644. struct nvme_fc_cmd_iu *cp;
  1645. /* Validate pointers. LLDD fault handling with transport does
  1646. * have timing races.
  1647. */
  1648. lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
  1649. if (unlikely(!lport))
  1650. return;
  1651. vport = lport->vport;
  1652. if (unlikely(!hw_queue_handle)) {
  1653. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
  1654. "6129 Fail Abort, HW Queue Handle NULL.\n");
  1655. return;
  1656. }
  1657. phba = vport->phba;
  1658. freqpriv = pnvme_fcreq->private;
  1659. if (unlikely(!freqpriv))
  1660. return;
  1661. if (vport->load_flag & FC_UNLOADING)
  1662. return;
  1663. /* Announce entry to new IO submit field. */
  1664. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
  1665. "6002 Abort Request to rport DID x%06x "
  1666. "for nvme_fc_req x%px\n",
  1667. pnvme_rport->port_id,
  1668. pnvme_fcreq);
  1669. /* If the hba is getting reset, this flag is set. It is
  1670. * cleared when the reset is complete and rings reestablished.
  1671. */
  1672. spin_lock_irqsave(&phba->hbalock, flags);
  1673. /* driver queued commands are in process of being flushed */
  1674. if (phba->hba_flag & HBA_IOQ_FLUSH) {
  1675. spin_unlock_irqrestore(&phba->hbalock, flags);
  1676. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  1677. "6139 Driver in reset cleanup - flushing "
  1678. "NVME Req now. hba_flag x%x\n",
  1679. phba->hba_flag);
  1680. return;
  1681. }
  1682. lpfc_nbuf = freqpriv->nvme_buf;
  1683. if (!lpfc_nbuf) {
  1684. spin_unlock_irqrestore(&phba->hbalock, flags);
  1685. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  1686. "6140 NVME IO req has no matching lpfc nvme "
  1687. "io buffer. Skipping abort req.\n");
  1688. return;
  1689. } else if (!lpfc_nbuf->nvmeCmd) {
  1690. spin_unlock_irqrestore(&phba->hbalock, flags);
  1691. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  1692. "6141 lpfc NVME IO req has no nvme_fcreq "
  1693. "io buffer. Skipping abort req.\n");
  1694. return;
  1695. }
  1696. nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
  1697. /* Guard against IO completion being called at same time */
  1698. spin_lock(&lpfc_nbuf->buf_lock);
  1699. /*
  1700. * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
  1701. * state must match the nvme_fcreq passed by the nvme
  1702. * transport. If they don't match, it is likely the driver
  1703. * has already completed the NVME IO and the nvme transport
  1704. * has not seen it yet.
  1705. */
  1706. if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
  1707. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  1708. "6143 NVME req mismatch: "
  1709. "lpfc_nbuf x%px nvmeCmd x%px, "
  1710. "pnvme_fcreq x%px. Skipping Abort xri x%x\n",
  1711. lpfc_nbuf, lpfc_nbuf->nvmeCmd,
  1712. pnvme_fcreq, nvmereq_wqe->sli4_xritag);
  1713. goto out_unlock;
  1714. }
  1715. /* Don't abort IOs no longer on the pending queue. */
  1716. if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
  1717. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  1718. "6142 NVME IO req x%px not queued - skipping "
  1719. "abort req xri x%x\n",
  1720. pnvme_fcreq, nvmereq_wqe->sli4_xritag);
  1721. goto out_unlock;
  1722. }
  1723. atomic_inc(&lport->xmt_fcp_abort);
  1724. lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
  1725. nvmereq_wqe->sli4_xritag,
  1726. nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
  1727. /* Outstanding abort is in progress */
  1728. if (nvmereq_wqe->cmd_flag & LPFC_DRIVER_ABORTED) {
  1729. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  1730. "6144 Outstanding NVME I/O Abort Request "
  1731. "still pending on nvme_fcreq x%px, "
  1732. "lpfc_ncmd x%px xri x%x\n",
  1733. pnvme_fcreq, lpfc_nbuf,
  1734. nvmereq_wqe->sli4_xritag);
  1735. goto out_unlock;
  1736. }
  1737. ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
  1738. lpfc_nvme_abort_fcreq_cmpl);
  1739. spin_unlock(&lpfc_nbuf->buf_lock);
  1740. spin_unlock_irqrestore(&phba->hbalock, flags);
  1741. /* Make sure HBA is alive */
  1742. lpfc_issue_hb_tmo(phba);
  1743. if (ret_val != WQE_SUCCESS) {
  1744. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  1745. "6137 Failed abts issue_wqe with status x%x "
  1746. "for nvme_fcreq x%px.\n",
  1747. ret_val, pnvme_fcreq);
  1748. return;
  1749. }
  1750. /*
  1751. * Get Command Id from cmd to plug into response. This
  1752. * code is not needed in the next NVME Transport drop.
  1753. */
  1754. cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr;
  1755. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
  1756. "6138 Transport Abort NVME Request Issued for "
  1757. "ox_id x%x nvme opcode x%x nvme cmd_id x%x\n",
  1758. nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode,
  1759. cp->sqe.common.command_id);
  1760. return;
  1761. out_unlock:
  1762. spin_unlock(&lpfc_nbuf->buf_lock);
  1763. spin_unlock_irqrestore(&phba->hbalock, flags);
  1764. return;
  1765. }
  1766. /* Declare and initialization an instance of the FC NVME template. */
  1767. static struct nvme_fc_port_template lpfc_nvme_template = {
  1768. /* initiator-based functions */
  1769. .localport_delete = lpfc_nvme_localport_delete,
  1770. .remoteport_delete = lpfc_nvme_remoteport_delete,
  1771. .create_queue = lpfc_nvme_create_queue,
  1772. .delete_queue = lpfc_nvme_delete_queue,
  1773. .ls_req = lpfc_nvme_ls_req,
  1774. .fcp_io = lpfc_nvme_fcp_io_submit,
  1775. .ls_abort = lpfc_nvme_ls_abort,
  1776. .fcp_abort = lpfc_nvme_fcp_abort,
  1777. .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp,
  1778. .max_hw_queues = 1,
  1779. .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
  1780. .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
  1781. .dma_boundary = 0xFFFFFFFF,
  1782. /* Sizes of additional private data for data structures.
  1783. * No use for the last two sizes at this time.
  1784. */
  1785. .local_priv_sz = sizeof(struct lpfc_nvme_lport),
  1786. .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
  1787. .lsrqst_priv_sz = 0,
  1788. .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
  1789. };
  1790. /*
  1791. * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
  1792. *
  1793. * This routine removes a nvme buffer from head of @hdwq io_buf_list
  1794. * and returns to caller.
  1795. *
  1796. * Return codes:
  1797. * NULL - Error
  1798. * Pointer to lpfc_nvme_buf - Success
  1799. **/
  1800. static struct lpfc_io_buf *
  1801. lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
  1802. int idx, int expedite)
  1803. {
  1804. struct lpfc_io_buf *lpfc_ncmd;
  1805. struct lpfc_sli4_hdw_queue *qp;
  1806. struct sli4_sge *sgl;
  1807. struct lpfc_iocbq *pwqeq;
  1808. union lpfc_wqe128 *wqe;
  1809. lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
  1810. if (lpfc_ncmd) {
  1811. pwqeq = &(lpfc_ncmd->cur_iocbq);
  1812. wqe = &pwqeq->wqe;
  1813. /* Setup key fields in buffer that may have been changed
  1814. * if other protocols used this buffer.
  1815. */
  1816. pwqeq->cmd_flag = LPFC_IO_NVME;
  1817. pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl;
  1818. lpfc_ncmd->start_time = jiffies;
  1819. lpfc_ncmd->flags = 0;
  1820. /* Rsp SGE will be filled in when we rcv an IO
  1821. * from the NVME Layer to be sent.
  1822. * The cmd is going to be embedded so we need a SKIP SGE.
  1823. */
  1824. sgl = lpfc_ncmd->dma_sgl;
  1825. bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
  1826. bf_set(lpfc_sli4_sge_last, sgl, 0);
  1827. sgl->word2 = cpu_to_le32(sgl->word2);
  1828. /* Fill in word 3 / sgl_len during cmd submission */
  1829. /* Initialize 64 bytes only */
  1830. memset(wqe, 0, sizeof(union lpfc_wqe));
  1831. if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
  1832. atomic_inc(&ndlp->cmd_pending);
  1833. lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
  1834. }
  1835. } else {
  1836. qp = &phba->sli4_hba.hdwq[idx];
  1837. qp->empty_io_bufs++;
  1838. }
  1839. return lpfc_ncmd;
  1840. }
  1841. /**
  1842. * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
  1843. * @phba: The Hba for which this call is being executed.
  1844. * @lpfc_ncmd: The nvme buffer which is being released.
  1845. *
  1846. * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
  1847. * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
  1848. * and cannot be reused for at least RA_TOV amount of time if it was
  1849. * aborted.
  1850. **/
  1851. static void
  1852. lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
  1853. {
  1854. struct lpfc_sli4_hdw_queue *qp;
  1855. unsigned long iflag = 0;
  1856. if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
  1857. atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
  1858. lpfc_ncmd->ndlp = NULL;
  1859. lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
  1860. qp = lpfc_ncmd->hdwq;
  1861. if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
  1862. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
  1863. "6310 XB release deferred for "
  1864. "ox_id x%x on reqtag x%x\n",
  1865. lpfc_ncmd->cur_iocbq.sli4_xritag,
  1866. lpfc_ncmd->cur_iocbq.iotag);
  1867. spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
  1868. list_add_tail(&lpfc_ncmd->list,
  1869. &qp->lpfc_abts_io_buf_list);
  1870. qp->abts_nvme_io_bufs++;
  1871. spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
  1872. } else
  1873. lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
  1874. }
  1875. /**
  1876. * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
  1877. * @vport: the lpfc_vport instance requesting a localport.
  1878. *
  1879. * This routine is invoked to create an nvme localport instance to bind
  1880. * to the nvme_fc_transport. It is called once during driver load
  1881. * like lpfc_create_shost after all other services are initialized.
  1882. * It requires a vport, vpi, and wwns at call time. Other localport
  1883. * parameters are modified as the driver's FCID and the Fabric WWN
  1884. * are established.
  1885. *
  1886. * Return codes
  1887. * 0 - successful
  1888. * -ENOMEM - no heap memory available
  1889. * other values - from nvme registration upcall
  1890. **/
  1891. int
  1892. lpfc_nvme_create_localport(struct lpfc_vport *vport)
  1893. {
  1894. int ret = 0;
  1895. struct lpfc_hba *phba = vport->phba;
  1896. struct nvme_fc_port_info nfcp_info;
  1897. struct nvme_fc_local_port *localport;
  1898. struct lpfc_nvme_lport *lport;
  1899. /* Initialize this localport instance. The vport wwn usage ensures
  1900. * that NPIV is accounted for.
  1901. */
  1902. memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
  1903. nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
  1904. nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
  1905. nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
  1906. /* We need to tell the transport layer + 1 because it takes page
  1907. * alignment into account. When space for the SGL is allocated we
  1908. * allocate + 3, one for cmd, one for rsp and one for this alignment
  1909. */
  1910. lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
  1911. /* Advertise how many hw queues we support based on cfg_hdw_queue,
  1912. * which will not exceed cpu count.
  1913. */
  1914. lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
  1915. if (!IS_ENABLED(CONFIG_NVME_FC))
  1916. return ret;
  1917. /* localport is allocated from the stack, but the registration
  1918. * call allocates heap memory as well as the private area.
  1919. */
  1920. ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
  1921. &vport->phba->pcidev->dev, &localport);
  1922. if (!ret) {
  1923. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
  1924. "6005 Successfully registered local "
  1925. "NVME port num %d, localP x%px, private "
  1926. "x%px, sg_seg %d\n",
  1927. localport->port_num, localport,
  1928. localport->private,
  1929. lpfc_nvme_template.max_sgl_segments);
  1930. /* Private is our lport size declared in the template. */
  1931. lport = (struct lpfc_nvme_lport *)localport->private;
  1932. vport->localport = localport;
  1933. lport->vport = vport;
  1934. vport->nvmei_support = 1;
  1935. atomic_set(&lport->xmt_fcp_noxri, 0);
  1936. atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
  1937. atomic_set(&lport->xmt_fcp_qdepth, 0);
  1938. atomic_set(&lport->xmt_fcp_err, 0);
  1939. atomic_set(&lport->xmt_fcp_wqerr, 0);
  1940. atomic_set(&lport->xmt_fcp_abort, 0);
  1941. atomic_set(&lport->xmt_ls_abort, 0);
  1942. atomic_set(&lport->xmt_ls_err, 0);
  1943. atomic_set(&lport->cmpl_fcp_xb, 0);
  1944. atomic_set(&lport->cmpl_fcp_err, 0);
  1945. atomic_set(&lport->cmpl_ls_xb, 0);
  1946. atomic_set(&lport->cmpl_ls_err, 0);
  1947. atomic_set(&lport->fc4NvmeLsRequests, 0);
  1948. atomic_set(&lport->fc4NvmeLsCmpls, 0);
  1949. }
  1950. return ret;
  1951. }
  1952. #if (IS_ENABLED(CONFIG_NVME_FC))
  1953. /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
  1954. *
  1955. * The driver has to wait for the host nvme transport to callback
  1956. * indicating the localport has successfully unregistered all
  1957. * resources. Since this is an uninterruptible wait, loop every ten
  1958. * seconds and print a message indicating no progress.
  1959. *
  1960. * An uninterruptible wait is used because of the risk of transport-to-
  1961. * driver state mismatch.
  1962. */
  1963. static void
  1964. lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
  1965. struct lpfc_nvme_lport *lport,
  1966. struct completion *lport_unreg_cmp)
  1967. {
  1968. u32 wait_tmo;
  1969. int ret, i, pending = 0;
  1970. struct lpfc_sli_ring *pring;
  1971. struct lpfc_hba *phba = vport->phba;
  1972. struct lpfc_sli4_hdw_queue *qp;
  1973. int abts_scsi, abts_nvme;
  1974. /* Host transport has to clean up and confirm requiring an indefinite
  1975. * wait. Print a message if a 10 second wait expires and renew the
  1976. * wait. This is unexpected.
  1977. */
  1978. wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
  1979. while (true) {
  1980. ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
  1981. if (unlikely(!ret)) {
  1982. pending = 0;
  1983. abts_scsi = 0;
  1984. abts_nvme = 0;
  1985. for (i = 0; i < phba->cfg_hdw_queue; i++) {
  1986. qp = &phba->sli4_hba.hdwq[i];
  1987. if (!vport->localport || !qp || !qp->io_wq)
  1988. return;
  1989. pring = qp->io_wq->pring;
  1990. if (!pring)
  1991. continue;
  1992. pending += pring->txcmplq_cnt;
  1993. abts_scsi += qp->abts_scsi_io_bufs;
  1994. abts_nvme += qp->abts_nvme_io_bufs;
  1995. }
  1996. if (!vport->localport ||
  1997. test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
  1998. vport->load_flag & FC_UNLOADING)
  1999. return;
  2000. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  2001. "6176 Lport x%px Localport x%px wait "
  2002. "timed out. Pending %d [%d:%d]. "
  2003. "Renewing.\n",
  2004. lport, vport->localport, pending,
  2005. abts_scsi, abts_nvme);
  2006. continue;
  2007. }
  2008. break;
  2009. }
  2010. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
  2011. "6177 Lport x%px Localport x%px Complete Success\n",
  2012. lport, vport->localport);
  2013. }
  2014. #endif
  2015. /**
  2016. * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
  2017. * @vport: pointer to a host virtual N_Port data structure
  2018. *
  2019. * This routine is invoked to destroy all lports bound to the phba.
  2020. * The lport memory was allocated by the nvme fc transport and is
  2021. * released there. This routine ensures all rports bound to the
  2022. * lport have been disconnected.
  2023. *
  2024. **/
  2025. void
  2026. lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
  2027. {
  2028. #if (IS_ENABLED(CONFIG_NVME_FC))
  2029. struct nvme_fc_local_port *localport;
  2030. struct lpfc_nvme_lport *lport;
  2031. int ret;
  2032. DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
  2033. if (vport->nvmei_support == 0)
  2034. return;
  2035. localport = vport->localport;
  2036. if (!localport)
  2037. return;
  2038. lport = (struct lpfc_nvme_lport *)localport->private;
  2039. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
  2040. "6011 Destroying NVME localport x%px\n",
  2041. localport);
  2042. /* lport's rport list is clear. Unregister
  2043. * lport and release resources.
  2044. */
  2045. lport->lport_unreg_cmp = &lport_unreg_cmp;
  2046. ret = nvme_fc_unregister_localport(localport);
  2047. /* Wait for completion. This either blocks
  2048. * indefinitely or succeeds
  2049. */
  2050. lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
  2051. vport->localport = NULL;
  2052. /* Regardless of the unregister upcall response, clear
  2053. * nvmei_support. All rports are unregistered and the
  2054. * driver will clean up.
  2055. */
  2056. vport->nvmei_support = 0;
  2057. if (ret == 0) {
  2058. lpfc_printf_vlog(vport,
  2059. KERN_INFO, LOG_NVME_DISC,
  2060. "6009 Unregistered lport Success\n");
  2061. } else {
  2062. lpfc_printf_vlog(vport,
  2063. KERN_INFO, LOG_NVME_DISC,
  2064. "6010 Unregistered lport "
  2065. "Failed, status x%x\n",
  2066. ret);
  2067. }
  2068. #endif
  2069. }
  2070. void
  2071. lpfc_nvme_update_localport(struct lpfc_vport *vport)
  2072. {
  2073. #if (IS_ENABLED(CONFIG_NVME_FC))
  2074. struct nvme_fc_local_port *localport;
  2075. struct lpfc_nvme_lport *lport;
  2076. localport = vport->localport;
  2077. if (!localport) {
  2078. lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
  2079. "6710 Update NVME fail. No localport\n");
  2080. return;
  2081. }
  2082. lport = (struct lpfc_nvme_lport *)localport->private;
  2083. if (!lport) {
  2084. lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
  2085. "6171 Update NVME fail. localP x%px, No lport\n",
  2086. localport);
  2087. return;
  2088. }
  2089. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
  2090. "6012 Update NVME lport x%px did x%x\n",
  2091. localport, vport->fc_myDID);
  2092. localport->port_id = vport->fc_myDID;
  2093. if (localport->port_id == 0)
  2094. localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
  2095. else
  2096. localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
  2097. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
  2098. "6030 bound lport x%px to DID x%06x\n",
  2099. lport, localport->port_id);
  2100. #endif
  2101. }
  2102. int
  2103. lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
  2104. {
  2105. #if (IS_ENABLED(CONFIG_NVME_FC))
  2106. int ret = 0;
  2107. struct nvme_fc_local_port *localport;
  2108. struct lpfc_nvme_lport *lport;
  2109. struct lpfc_nvme_rport *rport;
  2110. struct lpfc_nvme_rport *oldrport;
  2111. struct nvme_fc_remote_port *remote_port;
  2112. struct nvme_fc_port_info rpinfo;
  2113. struct lpfc_nodelist *prev_ndlp = NULL;
  2114. struct fc_rport *srport = ndlp->rport;
  2115. lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
  2116. "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
  2117. ndlp->nlp_DID, ndlp->nlp_type);
  2118. localport = vport->localport;
  2119. if (!localport)
  2120. return 0;
  2121. lport = (struct lpfc_nvme_lport *)localport->private;
  2122. /* NVME rports are not preserved across devloss.
  2123. * Just register this instance. Note, rpinfo->dev_loss_tmo
  2124. * is left 0 to indicate accept transport defaults. The
  2125. * driver communicates port role capabilities consistent
  2126. * with the PRLI response data.
  2127. */
  2128. memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
  2129. rpinfo.port_id = ndlp->nlp_DID;
  2130. if (ndlp->nlp_type & NLP_NVME_TARGET)
  2131. rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
  2132. if (ndlp->nlp_type & NLP_NVME_INITIATOR)
  2133. rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
  2134. if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
  2135. rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
  2136. rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
  2137. rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
  2138. if (srport)
  2139. rpinfo.dev_loss_tmo = srport->dev_loss_tmo;
  2140. else
  2141. rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
  2142. spin_lock_irq(&ndlp->lock);
  2143. /* If an oldrport exists, so does the ndlp reference. If not
  2144. * a new reference is needed because either the node has never
  2145. * been registered or it's been unregistered and getting deleted.
  2146. */
  2147. oldrport = lpfc_ndlp_get_nrport(ndlp);
  2148. if (oldrport) {
  2149. prev_ndlp = oldrport->ndlp;
  2150. spin_unlock_irq(&ndlp->lock);
  2151. } else {
  2152. spin_unlock_irq(&ndlp->lock);
  2153. if (!lpfc_nlp_get(ndlp)) {
  2154. dev_warn(&vport->phba->pcidev->dev,
  2155. "Warning - No node ref - exit register\n");
  2156. return 0;
  2157. }
  2158. }
  2159. ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
  2160. if (!ret) {
  2161. /* If the ndlp already has an nrport, this is just
  2162. * a resume of the existing rport. Else this is a
  2163. * new rport.
  2164. */
  2165. /* Guard against an unregister/reregister
  2166. * race that leaves the WAIT flag set.
  2167. */
  2168. spin_lock_irq(&ndlp->lock);
  2169. ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
  2170. ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
  2171. spin_unlock_irq(&ndlp->lock);
  2172. rport = remote_port->private;
  2173. if (oldrport) {
  2174. /* Sever the ndlp<->rport association
  2175. * before dropping the ndlp ref from
  2176. * register.
  2177. */
  2178. spin_lock_irq(&ndlp->lock);
  2179. ndlp->nrport = NULL;
  2180. ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
  2181. spin_unlock_irq(&ndlp->lock);
  2182. rport->ndlp = NULL;
  2183. rport->remoteport = NULL;
  2184. /* Reference only removed if previous NDLP is no longer
  2185. * active. It might be just a swap and removing the
  2186. * reference would cause a premature cleanup.
  2187. */
  2188. if (prev_ndlp && prev_ndlp != ndlp) {
  2189. if (!prev_ndlp->nrport)
  2190. lpfc_nlp_put(prev_ndlp);
  2191. }
  2192. }
  2193. /* Clean bind the rport to the ndlp. */
  2194. rport->remoteport = remote_port;
  2195. rport->lport = lport;
  2196. rport->ndlp = ndlp;
  2197. spin_lock_irq(&ndlp->lock);
  2198. ndlp->nrport = rport;
  2199. spin_unlock_irq(&ndlp->lock);
  2200. lpfc_printf_vlog(vport, KERN_INFO,
  2201. LOG_NVME_DISC | LOG_NODE,
  2202. "6022 Bind lport x%px to remoteport x%px "
  2203. "rport x%px WWNN 0x%llx, "
  2204. "Rport WWPN 0x%llx DID "
  2205. "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
  2206. lport, remote_port, rport,
  2207. rpinfo.node_name, rpinfo.port_name,
  2208. rpinfo.port_id, rpinfo.port_role,
  2209. ndlp, prev_ndlp);
  2210. } else {
  2211. lpfc_printf_vlog(vport, KERN_ERR,
  2212. LOG_TRACE_EVENT,
  2213. "6031 RemotePort Registration failed "
  2214. "err: %d, DID x%06x\n",
  2215. ret, ndlp->nlp_DID);
  2216. }
  2217. return ret;
  2218. #else
  2219. return 0;
  2220. #endif
  2221. }
  2222. /*
  2223. * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
  2224. *
  2225. * If the ndlp represents an NVME Target, that we are logged into,
  2226. * ping the NVME FC Transport layer to initiate a device rescan
  2227. * on this remote NPort.
  2228. */
  2229. void
  2230. lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
  2231. {
  2232. #if (IS_ENABLED(CONFIG_NVME_FC))
  2233. struct lpfc_nvme_rport *nrport;
  2234. struct nvme_fc_remote_port *remoteport = NULL;
  2235. spin_lock_irq(&ndlp->lock);
  2236. nrport = lpfc_ndlp_get_nrport(ndlp);
  2237. if (nrport)
  2238. remoteport = nrport->remoteport;
  2239. spin_unlock_irq(&ndlp->lock);
  2240. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
  2241. "6170 Rescan NPort DID x%06x type x%x "
  2242. "state x%x nrport x%px remoteport x%px\n",
  2243. ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
  2244. nrport, remoteport);
  2245. if (!nrport || !remoteport)
  2246. goto rescan_exit;
  2247. /* Rescan an NVME target in MAPPED state with DISCOVERY role set */
  2248. if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
  2249. ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
  2250. nvme_fc_rescan_remoteport(remoteport);
  2251. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
  2252. "6172 NVME rescanned DID x%06x "
  2253. "port_state x%x\n",
  2254. ndlp->nlp_DID, remoteport->port_state);
  2255. }
  2256. return;
  2257. rescan_exit:
  2258. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
  2259. "6169 Skip NVME Rport Rescan, NVME remoteport "
  2260. "unregistered\n");
  2261. #endif
  2262. }
  2263. /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
  2264. *
  2265. * There is no notion of Devloss or rport recovery from the current
  2266. * nvme_transport perspective. Loss of an rport just means IO cannot
  2267. * be sent and recovery is completely up to the initator.
  2268. * For now, the driver just unbinds the DID and port_role so that
  2269. * no further IO can be issued. Changes are planned for later.
  2270. *
  2271. * Notes - the ndlp reference count is not decremented here since
  2272. * since there is no nvme_transport api for devloss. Node ref count
  2273. * is only adjusted in driver unload.
  2274. */
  2275. void
  2276. lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
  2277. {
  2278. #if (IS_ENABLED(CONFIG_NVME_FC))
  2279. int ret;
  2280. struct nvme_fc_local_port *localport;
  2281. struct lpfc_nvme_lport *lport;
  2282. struct lpfc_nvme_rport *rport;
  2283. struct nvme_fc_remote_port *remoteport = NULL;
  2284. localport = vport->localport;
  2285. /* This is fundamental error. The localport is always
  2286. * available until driver unload. Just exit.
  2287. */
  2288. if (!localport)
  2289. return;
  2290. lport = (struct lpfc_nvme_lport *)localport->private;
  2291. if (!lport)
  2292. goto input_err;
  2293. spin_lock_irq(&ndlp->lock);
  2294. rport = lpfc_ndlp_get_nrport(ndlp);
  2295. if (rport)
  2296. remoteport = rport->remoteport;
  2297. spin_unlock_irq(&ndlp->lock);
  2298. if (!remoteport)
  2299. goto input_err;
  2300. lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
  2301. "6033 Unreg nvme remoteport x%px, portname x%llx, "
  2302. "port_id x%06x, portstate x%x port type x%x "
  2303. "refcnt %d\n",
  2304. remoteport, remoteport->port_name,
  2305. remoteport->port_id, remoteport->port_state,
  2306. ndlp->nlp_type, kref_read(&ndlp->kref));
  2307. /* Sanity check ndlp type. Only call for NVME ports. Don't
  2308. * clear any rport state until the transport calls back.
  2309. */
  2310. if (ndlp->nlp_type & NLP_NVME_TARGET) {
  2311. /* No concern about the role change on the nvme remoteport.
  2312. * The transport will update it.
  2313. */
  2314. spin_lock_irq(&vport->phba->hbalock);
  2315. ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT;
  2316. spin_unlock_irq(&vport->phba->hbalock);
  2317. /* Don't let the host nvme transport keep sending keep-alives
  2318. * on this remoteport. Vport is unloading, no recovery. The
  2319. * return values is ignored. The upcall is a courtesy to the
  2320. * transport.
  2321. */
  2322. if (vport->load_flag & FC_UNLOADING)
  2323. (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
  2324. ret = nvme_fc_unregister_remoteport(remoteport);
  2325. /* The driver no longer knows if the nrport memory is valid.
  2326. * because the controller teardown process has begun and
  2327. * is asynchronous. Break the binding in the ndlp. Also
  2328. * remove the register ndlp reference to setup node release.
  2329. */
  2330. ndlp->nrport = NULL;
  2331. lpfc_nlp_put(ndlp);
  2332. if (ret != 0) {
  2333. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  2334. "6167 NVME unregister failed %d "
  2335. "port_state x%x\n",
  2336. ret, remoteport->port_state);
  2337. }
  2338. }
  2339. return;
  2340. input_err:
  2341. #endif
  2342. lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
  2343. "6168 State error: lport x%px, rport x%px FCID x%06x\n",
  2344. vport->localport, ndlp->rport, ndlp->nlp_DID);
  2345. }
  2346. /**
  2347. * lpfc_sli4_nvme_pci_offline_aborted - Fast-path process of NVME xri abort
  2348. * @phba: pointer to lpfc hba data structure.
  2349. * @lpfc_ncmd: The nvme job structure for the request being aborted.
  2350. *
  2351. * This routine is invoked by the worker thread to process a SLI4 fast-path
  2352. * NVME aborted xri. Aborted NVME IO commands are completed to the transport
  2353. * here.
  2354. **/
  2355. void
  2356. lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
  2357. struct lpfc_io_buf *lpfc_ncmd)
  2358. {
  2359. struct nvmefc_fcp_req *nvme_cmd = NULL;
  2360. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
  2361. "6533 %s nvme_cmd %p tag x%x abort complete and "
  2362. "xri released\n", __func__,
  2363. lpfc_ncmd->nvmeCmd,
  2364. lpfc_ncmd->cur_iocbq.iotag);
  2365. /* Aborted NVME commands are required to not complete
  2366. * before the abort exchange command fully completes.
  2367. * Once completed, it is available via the put list.
  2368. */
  2369. if (lpfc_ncmd->nvmeCmd) {
  2370. nvme_cmd = lpfc_ncmd->nvmeCmd;
  2371. nvme_cmd->transferred_length = 0;
  2372. nvme_cmd->rcv_rsplen = 0;
  2373. nvme_cmd->status = NVME_SC_INTERNAL;
  2374. nvme_cmd->done(nvme_cmd);
  2375. lpfc_ncmd->nvmeCmd = NULL;
  2376. }
  2377. lpfc_release_nvme_buf(phba, lpfc_ncmd);
  2378. }
  2379. /**
  2380. * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
  2381. * @phba: pointer to lpfc hba data structure.
  2382. * @axri: pointer to the fcp xri abort wcqe structure.
  2383. * @lpfc_ncmd: The nvme job structure for the request being aborted.
  2384. *
  2385. * This routine is invoked by the worker thread to process a SLI4 fast-path
  2386. * NVME aborted xri. Aborted NVME IO commands are completed to the transport
  2387. * here.
  2388. **/
  2389. void
  2390. lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
  2391. struct sli4_wcqe_xri_aborted *axri,
  2392. struct lpfc_io_buf *lpfc_ncmd)
  2393. {
  2394. uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
  2395. struct nvmefc_fcp_req *nvme_cmd = NULL;
  2396. struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
  2397. if (ndlp)
  2398. lpfc_sli4_abts_err_handler(phba, ndlp, axri);
  2399. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
  2400. "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
  2401. "xri released\n",
  2402. lpfc_ncmd->nvmeCmd, xri,
  2403. lpfc_ncmd->cur_iocbq.iotag);
  2404. /* Aborted NVME commands are required to not complete
  2405. * before the abort exchange command fully completes.
  2406. * Once completed, it is available via the put list.
  2407. */
  2408. if (lpfc_ncmd->nvmeCmd) {
  2409. nvme_cmd = lpfc_ncmd->nvmeCmd;
  2410. nvme_cmd->done(nvme_cmd);
  2411. lpfc_ncmd->nvmeCmd = NULL;
  2412. }
  2413. lpfc_release_nvme_buf(phba, lpfc_ncmd);
  2414. }
  2415. /**
  2416. * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
  2417. * @phba: Pointer to HBA context object.
  2418. *
  2419. * This function flushes all wqes in the nvme rings and frees all resources
  2420. * in the txcmplq. This function does not issue abort wqes for the IO
  2421. * commands in txcmplq, they will just be returned with
  2422. * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
  2423. * slot has been permanently disabled.
  2424. **/
  2425. void
  2426. lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
  2427. {
  2428. struct lpfc_sli_ring *pring;
  2429. u32 i, wait_cnt = 0;
  2430. if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
  2431. return;
  2432. /* Cycle through all IO rings and make sure all outstanding
  2433. * WQEs have been removed from the txcmplqs.
  2434. */
  2435. for (i = 0; i < phba->cfg_hdw_queue; i++) {
  2436. if (!phba->sli4_hba.hdwq[i].io_wq)
  2437. continue;
  2438. pring = phba->sli4_hba.hdwq[i].io_wq->pring;
  2439. if (!pring)
  2440. continue;
  2441. /* Retrieve everything on the txcmplq */
  2442. while (!list_empty(&pring->txcmplq)) {
  2443. msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
  2444. wait_cnt++;
  2445. /* The sleep is 10mS. Every ten seconds,
  2446. * dump a message. Something is wrong.
  2447. */
  2448. if ((wait_cnt % 1000) == 0) {
  2449. lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
  2450. "6178 NVME IO not empty, "
  2451. "cnt %d\n", wait_cnt);
  2452. }
  2453. }
  2454. }
  2455. /* Make sure HBA is alive */
  2456. lpfc_issue_hb_tmo(phba);
  2457. }
  2458. void
  2459. lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
  2460. uint32_t stat, uint32_t param)
  2461. {
  2462. #if (IS_ENABLED(CONFIG_NVME_FC))
  2463. struct lpfc_io_buf *lpfc_ncmd;
  2464. struct nvmefc_fcp_req *nCmd;
  2465. struct lpfc_wcqe_complete wcqe;
  2466. struct lpfc_wcqe_complete *wcqep = &wcqe;
  2467. lpfc_ncmd = pwqeIn->io_buf;
  2468. if (!lpfc_ncmd) {
  2469. lpfc_sli_release_iocbq(phba, pwqeIn);
  2470. return;
  2471. }
  2472. /* For abort iocb just return, IO iocb will do a done call */
  2473. if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
  2474. CMD_ABORT_XRI_CX) {
  2475. lpfc_sli_release_iocbq(phba, pwqeIn);
  2476. return;
  2477. }
  2478. spin_lock(&lpfc_ncmd->buf_lock);
  2479. nCmd = lpfc_ncmd->nvmeCmd;
  2480. if (!nCmd) {
  2481. spin_unlock(&lpfc_ncmd->buf_lock);
  2482. lpfc_release_nvme_buf(phba, lpfc_ncmd);
  2483. return;
  2484. }
  2485. spin_unlock(&lpfc_ncmd->buf_lock);
  2486. lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
  2487. "6194 NVME Cancel xri %x\n",
  2488. lpfc_ncmd->cur_iocbq.sli4_xritag);
  2489. wcqep->word0 = 0;
  2490. bf_set(lpfc_wcqe_c_status, wcqep, stat);
  2491. wcqep->parameter = param;
  2492. wcqep->total_data_placed = 0;
  2493. wcqep->word3 = 0; /* xb is 0 */
  2494. /* Call release with XB=1 to queue the IO into the abort list. */
  2495. if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
  2496. bf_set(lpfc_wcqe_c_xb, wcqep, 1);
  2497. memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep));
  2498. (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn);
  2499. #endif
  2500. }